hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c4943683afb62f8fb1168cb730218c3287099c4
| 5,313
|
py
|
Python
|
app/api/v2/tests/test_candidate.py
|
softMaina/political-v2
|
985e96ec0ff6cc866a26538ef7a69436de7e17d0
|
[
"MIT"
] | 2
|
2019-03-17T08:11:13.000Z
|
2019-11-14T06:08:50.000Z
|
app/api/v2/tests/test_candidate.py
|
softMaina/political-v2
|
985e96ec0ff6cc866a26538ef7a69436de7e17d0
|
[
"MIT"
] | null | null | null |
app/api/v2/tests/test_candidate.py
|
softMaina/political-v2
|
985e96ec0ff6cc866a26538ef7a69436de7e17d0
|
[
"MIT"
] | null | null | null |
import json
from flask import current_app
from app.api.v2.tests import base_tests
from . import helper_functions
from app.api.v2.tests.helper_functions import convert_response_to_json
class TestCandidates(base_tests.TestBaseClass):
"""
A class to test candidate's endpoints
:param: object of the TestBaseClass
"""
def register_user(self):
"""
user registration
:Response: status 201, data, user_data
"""
response = self.app_test_client.post('api/v2/auth/register', json=self.Admin)
return response.status_code
def log_user(self):
"""
Method to login
:Response: auth_token
"""
response1 = self.app_test_client.post('/api/v2/auth/signup',json=self.Admin)
response = self.app_test_client.post('/api/v2/auth/login',json=self.admin_login)
token = convert_response_to_json(response)['token']
return token
def test_candidate(self):
"""
Method to test candidate registration
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 201)
def test_candidate_no_token(self):
"""
Method to test candidate registration with no auth token
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY)
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = ""), content_type='application/json')
self.assertEqual(response.status_code,401)
def test_candidate_invalid_token(self):
"""
Method to test candidate registration with invalid token
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY)
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = "invalid_token"), content_type='application/json')
self.assertEqual(response.status_code,403)
def test_candidate_with_no_office(self):
"""
Method to test candidate registration without existing offices
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code,400)
def test_candidate_with_no_party(self):
"""
Method to test candidate registration with no existing parties
"""
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code,400)
def test_candidate_with_missing_keys(self):
"""
Test add office with missing keys
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json={
"office":1
}, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 400)
def test_candidate_with_string(self):
"""
Test add office with missing keys
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json={
"office":"one",
"party":1
}, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 400)
def test_candidate_with_party_string(self):
"""
Test add office with missing keys
"""
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json={
"office":1,
"party":"one"
}, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 400)
| 45.410256
| 165
| 0.680783
|
import json
from flask import current_app
from app.api.v2.tests import base_tests
from . import helper_functions
from app.api.v2.tests.helper_functions import convert_response_to_json
class TestCandidates(base_tests.TestBaseClass):
def register_user(self):
response = self.app_test_client.post('api/v2/auth/register', json=self.Admin)
return response.status_code
def log_user(self):
response1 = self.app_test_client.post('/api/v2/auth/signup',json=self.Admin)
response = self.app_test_client.post('/api/v2/auth/login',json=self.admin_login)
token = convert_response_to_json(response)['token']
return token
def test_candidate(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 201)
def test_candidate_no_token(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY)
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = ""), content_type='application/json')
self.assertEqual(response.status_code,401)
def test_candidate_invalid_token(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY)
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = "invalid_token"), content_type='application/json')
self.assertEqual(response.status_code,403)
def test_candidate_with_no_office(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code,400)
def test_candidate_with_no_party(self):
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE)
response = self.app_test_client.post('api/v2/candidates',json=self.Candidate, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code,400)
def test_candidate_with_missing_keys(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json={
"office":1
}, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 400)
def test_candidate_with_string(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json={
"office":"one",
"party":1
}, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 400)
def test_candidate_with_party_string(self):
add_party = self.app_test_client.post('api/v2/parties',json=self.PARTY, headers=dict(Authorization = self.log_user()))
add_office = self.app_test_client.post('api/v2/offices',json=self.OFFICE, headers=dict(Authorization = self.log_user()))
response = self.app_test_client.post('api/v2/candidates',json={
"office":1,
"party":"one"
}, headers=dict(Authorization = self.log_user()), content_type='application/json')
self.assertEqual(response.status_code, 400)
| true
| true
|
1c49452cccd050c0ae0b8b5468b700cbb6d115c9
| 1,059
|
py
|
Python
|
feature_importance_v2.py
|
terryli710/MPS_regression
|
d8f9c94ad315734ff9376a53e6be3f508b4da742
|
[
"MIT"
] | null | null | null |
feature_importance_v2.py
|
terryli710/MPS_regression
|
d8f9c94ad315734ff9376a53e6be3f508b4da742
|
[
"MIT"
] | null | null | null |
feature_importance_v2.py
|
terryli710/MPS_regression
|
d8f9c94ad315734ff9376a53e6be3f508b4da742
|
[
"MIT"
] | null | null | null |
## Without filtering results with VIF, calculate the importance for all the features.
## Works for "first" and "structcoef"
from util_relaimpo import *
from util import loadNpy, loadCsv
def main(x_name, y_name, method, feature_names = []):
# INFO
print("Dataset", x_name.split('_')[0])
print("Method", str(method).split(' ')[1])
# load data
X = loadNpy(['data', 'X', x_name])
Y = loadNpy(['data', 'Y', y_name])
# make dataframe
if feature_names: xdf = pd.DataFrame(data=X, columns=feature_names)
else: xdf = pd.DataFrame(data=X)
print("bootstrapping ...")
coef_boot = bootstrapping(xdf, Y, method)
print(printBootResult(coef_boot, list(xdf.columns), list(xdf.columns)))
feature_names = getFeatureNames(loadCsv(['data', 'X', 'feature_descriptions.csv']))
if __name__ == '__main__':
main('HM_X_ang_vel.npy','HM_MPSCC95.npy', structcoef, feature_names)
main('AF_X_ang_vel.npy', 'AF_MPSCC95.npy', structcoef, feature_names)
main('NFL53_X_ang_vel.npy', 'NFL53_MPSCC95.npy', structcoef, feature_names)
| 40.730769
| 85
| 0.693107
|
from util_relaimpo import *
from util import loadNpy, loadCsv
def main(x_name, y_name, method, feature_names = []):
print("Dataset", x_name.split('_')[0])
print("Method", str(method).split(' ')[1])
X = loadNpy(['data', 'X', x_name])
Y = loadNpy(['data', 'Y', y_name])
if feature_names: xdf = pd.DataFrame(data=X, columns=feature_names)
else: xdf = pd.DataFrame(data=X)
print("bootstrapping ...")
coef_boot = bootstrapping(xdf, Y, method)
print(printBootResult(coef_boot, list(xdf.columns), list(xdf.columns)))
feature_names = getFeatureNames(loadCsv(['data', 'X', 'feature_descriptions.csv']))
if __name__ == '__main__':
main('HM_X_ang_vel.npy','HM_MPSCC95.npy', structcoef, feature_names)
main('AF_X_ang_vel.npy', 'AF_MPSCC95.npy', structcoef, feature_names)
main('NFL53_X_ang_vel.npy', 'NFL53_MPSCC95.npy', structcoef, feature_names)
| true
| true
|
1c494530de31aff8b8204b0ef28d50b5b3cad91c
| 113
|
py
|
Python
|
tcex/sessions/__init__.py
|
brikardtc/tcex
|
78680f055f4259e31f0b4989a5695604108d9fdd
|
[
"Apache-2.0"
] | null | null | null |
tcex/sessions/__init__.py
|
brikardtc/tcex
|
78680f055f4259e31f0b4989a5695604108d9fdd
|
[
"Apache-2.0"
] | null | null | null |
tcex/sessions/__init__.py
|
brikardtc/tcex
|
78680f055f4259e31f0b4989a5695604108d9fdd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Session module for TcEx Framework"""
# flake8: noqa
from .tc_session import TcSession
| 22.6
| 39
| 0.690265
|
from .tc_session import TcSession
| true
| true
|
1c49454c8e0883c7cc820aae3666d057cd052c30
| 3,723
|
py
|
Python
|
monai/data/synthetic.py
|
loftwah/MONAI
|
37fb3e779121e6dc74127993df102fc91d9065f8
|
[
"Apache-2.0"
] | 1
|
2020-04-23T13:05:29.000Z
|
2020-04-23T13:05:29.000Z
|
monai/data/synthetic.py
|
tranduyquockhanh/MONAI
|
37fb3e779121e6dc74127993df102fc91d9065f8
|
[
"Apache-2.0"
] | null | null | null |
monai/data/synthetic.py
|
tranduyquockhanh/MONAI
|
37fb3e779121e6dc74127993df102fc91d9065f8
|
[
"Apache-2.0"
] | 1
|
2021-09-20T12:10:01.000Z
|
2021-09-20T12:10:01.000Z
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from monai.transforms.utils import rescale_array
def create_test_image_2d(width, height, num_objs=12, rad_max=30, noise_max=0.0, num_seg_classes=5, channel_dim=None):
"""
Return a noisy 2D image with `num_obj` circles and a 2D mask image. The maximum radius of the circles is given as
`rad_max`. The mask will have `num_seg_classes` number of classes for segmentations labeled sequentially from 1, plus a
background class represented as 0. If `noise_max` is greater than 0 then noise will be added to the image taken from
the uniform distribution on range `[0,noise_max)`. If `channel_dim` is None, will create an image without channel
dimension, otherwise create an image with channel dimension as first dim or last dim.
"""
image = np.zeros((width, height))
for i in range(num_objs):
x = np.random.randint(rad_max, width - rad_max)
y = np.random.randint(rad_max, height - rad_max)
rad = np.random.randint(5, rad_max)
spy, spx = np.ogrid[-x:width - x, -y:height - y]
circle = (spx * spx + spy * spy) <= rad * rad
if num_seg_classes > 1:
image[circle] = np.ceil(np.random.random() * num_seg_classes)
else:
image[circle] = np.random.random() * 0.5 + 0.5
labels = np.ceil(image).astype(np.int32)
norm = np.random.uniform(0, num_seg_classes * noise_max, size=image.shape)
noisyimage = rescale_array(np.maximum(image, norm))
if channel_dim is not None:
assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 2), 'invalid channel dim.'
noisyimage, labels = noisyimage[None], labels[None] \
if channel_dim == 0 else (noisyimage[..., None], labels[..., None])
return noisyimage, labels
def create_test_image_3d(height, width, depth, num_objs=12, rad_max=30,
noise_max=0.0, num_seg_classes=5, channel_dim=None):
"""
Return a noisy 3D image and segmentation.
See also:
:py:meth:`~create_test_image_2d`
"""
image = np.zeros((width, height, depth))
for i in range(num_objs):
x = np.random.randint(rad_max, width - rad_max)
y = np.random.randint(rad_max, height - rad_max)
z = np.random.randint(rad_max, depth - rad_max)
rad = np.random.randint(5, rad_max)
spy, spx, spz = np.ogrid[-x:width - x, -y:height - y, -z:depth - z]
circle = (spx * spx + spy * spy + spz * spz) <= rad * rad
if num_seg_classes > 1:
image[circle] = np.ceil(np.random.random() * num_seg_classes)
else:
image[circle] = np.random.random() * 0.5 + 0.5
labels = np.ceil(image).astype(np.int32)
norm = np.random.uniform(0, num_seg_classes * noise_max, size=image.shape)
noisyimage = rescale_array(np.maximum(image, norm))
if channel_dim is not None:
assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 3), 'invalid channel dim.'
noisyimage, labels = (noisyimage[None], labels[None]) \
if channel_dim == 0 else (noisyimage[..., None], labels[..., None])
return noisyimage, labels
| 43.290698
| 123
| 0.665055
|
import numpy as np
from monai.transforms.utils import rescale_array
def create_test_image_2d(width, height, num_objs=12, rad_max=30, noise_max=0.0, num_seg_classes=5, channel_dim=None):
image = np.zeros((width, height))
for i in range(num_objs):
x = np.random.randint(rad_max, width - rad_max)
y = np.random.randint(rad_max, height - rad_max)
rad = np.random.randint(5, rad_max)
spy, spx = np.ogrid[-x:width - x, -y:height - y]
circle = (spx * spx + spy * spy) <= rad * rad
if num_seg_classes > 1:
image[circle] = np.ceil(np.random.random() * num_seg_classes)
else:
image[circle] = np.random.random() * 0.5 + 0.5
labels = np.ceil(image).astype(np.int32)
norm = np.random.uniform(0, num_seg_classes * noise_max, size=image.shape)
noisyimage = rescale_array(np.maximum(image, norm))
if channel_dim is not None:
assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 2), 'invalid channel dim.'
noisyimage, labels = noisyimage[None], labels[None] \
if channel_dim == 0 else (noisyimage[..., None], labels[..., None])
return noisyimage, labels
def create_test_image_3d(height, width, depth, num_objs=12, rad_max=30,
noise_max=0.0, num_seg_classes=5, channel_dim=None):
image = np.zeros((width, height, depth))
for i in range(num_objs):
x = np.random.randint(rad_max, width - rad_max)
y = np.random.randint(rad_max, height - rad_max)
z = np.random.randint(rad_max, depth - rad_max)
rad = np.random.randint(5, rad_max)
spy, spx, spz = np.ogrid[-x:width - x, -y:height - y, -z:depth - z]
circle = (spx * spx + spy * spy + spz * spz) <= rad * rad
if num_seg_classes > 1:
image[circle] = np.ceil(np.random.random() * num_seg_classes)
else:
image[circle] = np.random.random() * 0.5 + 0.5
labels = np.ceil(image).astype(np.int32)
norm = np.random.uniform(0, num_seg_classes * noise_max, size=image.shape)
noisyimage = rescale_array(np.maximum(image, norm))
if channel_dim is not None:
assert isinstance(channel_dim, int) and channel_dim in (-1, 0, 3), 'invalid channel dim.'
noisyimage, labels = (noisyimage[None], labels[None]) \
if channel_dim == 0 else (noisyimage[..., None], labels[..., None])
return noisyimage, labels
| true
| true
|
1c49454d298b0470f6d86b30368b4e5d57afc0e8
| 1,953
|
py
|
Python
|
DataSource/TickData.py
|
dukechain2333/BossaNova
|
af9fa7abf060b2e070aa6469afa44fd2861d5a22
|
[
"MIT"
] | 2
|
2020-10-15T12:48:01.000Z
|
2021-09-11T01:44:28.000Z
|
DataSource/TickData.py
|
dukechain2333/BossaNova
|
af9fa7abf060b2e070aa6469afa44fd2861d5a22
|
[
"MIT"
] | null | null | null |
DataSource/TickData.py
|
dukechain2333/BossaNova
|
af9fa7abf060b2e070aa6469afa44fd2861d5a22
|
[
"MIT"
] | null | null | null |
# @author Duke Chain
# @File:TickData.py
# @createTime 2020/12/08 15:25:08
import threading
from DBOperate.CreateStockInfo import CreateStockInfo
from DBOperate.AddStockInfo import AddStockInfo
import akshare as ak
class TickData(threading.Thread):
"""
获取Tick数据并写入数据库
Args:
ak:传入akshare接口
stockID:传入股票代码
dateList:传入日期列表
"""
def __init__(self, ak, stockID, dateList):
super().__init__()
self.ak = ak
self.stockID = stockID
self.dateList = dateList
def run(self):
createInfo = CreateStockInfo(self.stockID, 'stock_info_tick', 't')
createInfo.createTable()
for date in self.dateList:
data = self.ak.stock_zh_a_tick_tx(code=self.stockID, trade_date=date)
for i in range(data.shape[0]):
try:
print(date + ' ' + data['成交时间'][i])
trade_date = date + ' ' + data['成交时间'][i]
stock_price = data['成交价格'][i]
chg = data['价格变动'][i]
volume = data['成交量(手)'][i]
except IndexError:
print(date, '数据不存在,请修改时间!')
else:
addInfo = AddStockInfo(self.stockID, trade_date=trade_date, close_price=stock_price, chg=chg,
volume=volume)
addInfo.addInfoTick()
# if __name__ == '__main__':
# dateList = ['20200907', '20200908', '20200909', '20200910', '20200911']
# thread1 = TickData(ak, "sh601808", dateList)
# thread2 = TickData(ak, "sh601811", dateList)
# thread3 = TickData(ak, "sh601858", dateList)
# thread4 = TickData(ak, "sh601878", dateList)
#
# thread1.start()
# thread2.start()
# thread3.start()
# thread4.start()
#
# thread1.join()
# thread2.join()
# thread3.join()
# thread4.join()
#
# print("ALL DONE!")
| 29.590909
| 113
| 0.550947
|
import threading
from DBOperate.CreateStockInfo import CreateStockInfo
from DBOperate.AddStockInfo import AddStockInfo
import akshare as ak
class TickData(threading.Thread):
def __init__(self, ak, stockID, dateList):
super().__init__()
self.ak = ak
self.stockID = stockID
self.dateList = dateList
def run(self):
createInfo = CreateStockInfo(self.stockID, 'stock_info_tick', 't')
createInfo.createTable()
for date in self.dateList:
data = self.ak.stock_zh_a_tick_tx(code=self.stockID, trade_date=date)
for i in range(data.shape[0]):
try:
print(date + ' ' + data['成交时间'][i])
trade_date = date + ' ' + data['成交时间'][i]
stock_price = data['成交价格'][i]
chg = data['价格变动'][i]
volume = data['成交量(手)'][i]
except IndexError:
print(date, '数据不存在,请修改时间!')
else:
addInfo = AddStockInfo(self.stockID, trade_date=trade_date, close_price=stock_price, chg=chg,
volume=volume)
addInfo.addInfoTick()
| true
| true
|
1c49456a4e965385fb2cd2b8f180a1dcc77558ad
| 7,238
|
py
|
Python
|
tools/train.py
|
tszssong/HRNet-Image-Classification
|
6d8ee24aedf2e0b3134102c221a29fb9b0ce2e1b
|
[
"MIT"
] | null | null | null |
tools/train.py
|
tszssong/HRNet-Image-Classification
|
6d8ee24aedf2e0b3134102c221a29fb9b0ce2e1b
|
[
"MIT"
] | null | null | null |
tools/train.py
|
tszssong/HRNet-Image-Classification
|
6d8ee24aedf2e0b3134102c221a29fb9b0ce2e1b
|
[
"MIT"
] | null | null | null |
# ------------------------------------------------------------------------------
# Copyright (c) Microsoft
# Licensed under the MIT License.
# Written by Bin Xiao (Bin.Xiao@microsoft.com)
# Modified by Ke Sun (sunk@mail.ustc.edu.cn)
# ------------------------------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pprint
import shutil
import sys
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
import _init_paths
import models
from config import config
from config import update_config
from core.function import train
from core.function import validate
from utils.modelsummary import get_model_summary
from utils.utils import get_optimizer
from utils.utils import save_checkpoint
from utils.utils import create_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train classification network')
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument('--modelDir',
help='model directory',
type=str,
default='')
parser.add_argument('--logDir',
help='log directory',
type=str,
default='')
parser.add_argument('--dataDir',
help='data directory',
type=str,
default='')
parser.add_argument('--testModel',
help='testModel',
type=str,
default='')
args = parser.parse_args()
update_config(config, args)
return args
def main():
args = parse_args()
logger, final_output_dir, tb_log_dir = create_logger(
config, args.cfg, 'train')
logger.info(pprint.pformat(args))
logger.info(pprint.pformat(config))
# cudnn related setting
cudnn.benchmark = config.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = config.CUDNN.ENABLED
model = eval('models.'+config.MODEL.NAME+'.get_cls_net')(
config)
dump_input = torch.rand(
(1, 3, config.MODEL.IMAGE_SIZE[1], config.MODEL.IMAGE_SIZE[0])
)
logger.info(get_model_summary(model, dump_input))
# copy model file
this_dir = os.path.dirname(__file__)
models_dst_dir = os.path.join(final_output_dir, 'models')
if os.path.exists(models_dst_dir):
shutil.rmtree(models_dst_dir)
shutil.copytree(os.path.join(this_dir, '../lib/models'), models_dst_dir)
writer_dict = {
'writer': SummaryWriter(log_dir=tb_log_dir),
'train_global_steps': 0,
'valid_global_steps': 0,
}
gpus = list(config.GPUS)
print("gpus:",gpus,type(gpus))
DEVICE = torch.device("cuda:%d"%config.GPUS[0] if torch.cuda.is_available() else "cpu")
model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
model = model.to(DEVICE)
# define loss function (criterion) and optimizer
criterion = torch.nn.CrossEntropyLoss().cuda()
optimizer = get_optimizer(config, model)
best_perf = 0.0
best_model = False
last_epoch = config.TRAIN.BEGIN_EPOCH
if config.TRAIN.RESUME:
model_state_file = os.path.join(final_output_dir,
'checkpoint.pth.tar')
if os.path.isfile(model_state_file):
checkpoint = torch.load(model_state_file)
last_epoch = checkpoint['epoch']
best_perf = checkpoint['perf']
model.module.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger.info("=> loaded checkpoint (epoch {})"
.format(checkpoint['epoch']))
best_model = True
if isinstance(config.TRAIN.LR_STEP, list):
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR,
last_epoch-1
)
else:
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR,
last_epoch-1
)
# Data loading code
traindir = os.path.join(config.DATASET.ROOT, config.DATASET.TRAIN_SET)
valdir = os.path.join(config.DATASET.ROOT, config.DATASET.TEST_SET)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(config.MODEL.IMAGE_SIZE[0]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.TRAIN.BATCH_SIZE_PER_GPU*len(gpus),
shuffle=True,
num_workers=config.WORKERS,
pin_memory=True
)
valid_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(int(config.MODEL.IMAGE_SIZE[0] / 0.875)),
transforms.CenterCrop(config.MODEL.IMAGE_SIZE[0]),
transforms.ToTensor(),
normalize,
])),
batch_size=config.TEST.BATCH_SIZE_PER_GPU*len(gpus),
shuffle=False,
num_workers=config.WORKERS,
pin_memory=True
)
for epoch in range(last_epoch, config.TRAIN.END_EPOCH):
lr_scheduler.step()
# train for one epoch
train(config, train_loader, model, DEVICE, criterion, optimizer, epoch,
final_output_dir, tb_log_dir, writer_dict)
# evaluate on validation set
perf_indicator = validate(config, valid_loader, model, criterion,
final_output_dir, tb_log_dir, writer_dict)
if perf_indicator > best_perf:
best_perf = perf_indicator
best_model = True
else:
best_model = False
logger.info('=> saving checkpoint to {}'.format(final_output_dir))
save_checkpoint({
'epoch': epoch + 1,
'model': config.MODEL.NAME,
'state_dict': model.module.state_dict(),
'perf': perf_indicator,
'optimizer': optimizer.state_dict(),
}, best_model, final_output_dir, filename='checkpoint.pth.tar')
final_model_state_file = os.path.join(final_output_dir,
'final_state.pth.tar')
logger.info('saving final model state to {}'.format(
final_model_state_file))
torch.save(model.module.state_dict(), final_model_state_file)
writer_dict['writer'].close()
if __name__ == '__main__':
main()
| 33.665116
| 91
| 0.613844
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import pprint
import shutil
import sys
import torch
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from tensorboardX import SummaryWriter
import _init_paths
import models
from config import config
from config import update_config
from core.function import train
from core.function import validate
from utils.modelsummary import get_model_summary
from utils.utils import get_optimizer
from utils.utils import save_checkpoint
from utils.utils import create_logger
def parse_args():
parser = argparse.ArgumentParser(description='Train classification network')
parser.add_argument('--cfg',
help='experiment configure file name',
required=True,
type=str)
parser.add_argument('--modelDir',
help='model directory',
type=str,
default='')
parser.add_argument('--logDir',
help='log directory',
type=str,
default='')
parser.add_argument('--dataDir',
help='data directory',
type=str,
default='')
parser.add_argument('--testModel',
help='testModel',
type=str,
default='')
args = parser.parse_args()
update_config(config, args)
return args
def main():
args = parse_args()
logger, final_output_dir, tb_log_dir = create_logger(
config, args.cfg, 'train')
logger.info(pprint.pformat(args))
logger.info(pprint.pformat(config))
cudnn.benchmark = config.CUDNN.BENCHMARK
torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
torch.backends.cudnn.enabled = config.CUDNN.ENABLED
model = eval('models.'+config.MODEL.NAME+'.get_cls_net')(
config)
dump_input = torch.rand(
(1, 3, config.MODEL.IMAGE_SIZE[1], config.MODEL.IMAGE_SIZE[0])
)
logger.info(get_model_summary(model, dump_input))
this_dir = os.path.dirname(__file__)
models_dst_dir = os.path.join(final_output_dir, 'models')
if os.path.exists(models_dst_dir):
shutil.rmtree(models_dst_dir)
shutil.copytree(os.path.join(this_dir, '../lib/models'), models_dst_dir)
writer_dict = {
'writer': SummaryWriter(log_dir=tb_log_dir),
'train_global_steps': 0,
'valid_global_steps': 0,
}
gpus = list(config.GPUS)
print("gpus:",gpus,type(gpus))
DEVICE = torch.device("cuda:%d"%config.GPUS[0] if torch.cuda.is_available() else "cpu")
model = torch.nn.DataParallel(model, device_ids=gpus).cuda()
model = model.to(DEVICE)
criterion = torch.nn.CrossEntropyLoss().cuda()
optimizer = get_optimizer(config, model)
best_perf = 0.0
best_model = False
last_epoch = config.TRAIN.BEGIN_EPOCH
if config.TRAIN.RESUME:
model_state_file = os.path.join(final_output_dir,
'checkpoint.pth.tar')
if os.path.isfile(model_state_file):
checkpoint = torch.load(model_state_file)
last_epoch = checkpoint['epoch']
best_perf = checkpoint['perf']
model.module.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
logger.info("=> loaded checkpoint (epoch {})"
.format(checkpoint['epoch']))
best_model = True
if isinstance(config.TRAIN.LR_STEP, list):
lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(
optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR,
last_epoch-1
)
else:
lr_scheduler = torch.optim.lr_scheduler.StepLR(
optimizer, config.TRAIN.LR_STEP, config.TRAIN.LR_FACTOR,
last_epoch-1
)
traindir = os.path.join(config.DATASET.ROOT, config.DATASET.TRAIN_SET)
valdir = os.path.join(config.DATASET.ROOT, config.DATASET.TEST_SET)
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(config.MODEL.IMAGE_SIZE[0]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=config.TRAIN.BATCH_SIZE_PER_GPU*len(gpus),
shuffle=True,
num_workers=config.WORKERS,
pin_memory=True
)
valid_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(int(config.MODEL.IMAGE_SIZE[0] / 0.875)),
transforms.CenterCrop(config.MODEL.IMAGE_SIZE[0]),
transforms.ToTensor(),
normalize,
])),
batch_size=config.TEST.BATCH_SIZE_PER_GPU*len(gpus),
shuffle=False,
num_workers=config.WORKERS,
pin_memory=True
)
for epoch in range(last_epoch, config.TRAIN.END_EPOCH):
lr_scheduler.step()
train(config, train_loader, model, DEVICE, criterion, optimizer, epoch,
final_output_dir, tb_log_dir, writer_dict)
perf_indicator = validate(config, valid_loader, model, criterion,
final_output_dir, tb_log_dir, writer_dict)
if perf_indicator > best_perf:
best_perf = perf_indicator
best_model = True
else:
best_model = False
logger.info('=> saving checkpoint to {}'.format(final_output_dir))
save_checkpoint({
'epoch': epoch + 1,
'model': config.MODEL.NAME,
'state_dict': model.module.state_dict(),
'perf': perf_indicator,
'optimizer': optimizer.state_dict(),
}, best_model, final_output_dir, filename='checkpoint.pth.tar')
final_model_state_file = os.path.join(final_output_dir,
'final_state.pth.tar')
logger.info('saving final model state to {}'.format(
final_model_state_file))
torch.save(model.module.state_dict(), final_model_state_file)
writer_dict['writer'].close()
if __name__ == '__main__':
main()
| true
| true
|
1c4945b72a6e9e9e1a10dfad2632125e558e165c
| 860
|
py
|
Python
|
migrations/0066_auto_20190820_1448.py
|
audaciouscode/PassiveDataKit-Django
|
ed1e00c436801b9f49a3e0e6657c2adb6b2ba3d4
|
[
"Apache-2.0"
] | 5
|
2016-01-26T19:19:44.000Z
|
2018-12-12T18:04:04.000Z
|
migrations/0066_auto_20190820_1448.py
|
audacious-software/PassiveDataKit-Django
|
da91a375c075ceec938f2c9bb6b011f9f019b024
|
[
"Apache-2.0"
] | 6
|
2020-02-17T20:16:28.000Z
|
2021-12-13T21:51:20.000Z
|
migrations/0066_auto_20190820_1448.py
|
audacious-software/PassiveDataKit-Django
|
da91a375c075ceec938f2c9bb6b011f9f019b024
|
[
"Apache-2.0"
] | 4
|
2020-01-29T15:36:58.000Z
|
2021-06-01T18:55:26.000Z
|
# pylint: skip-file
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2019-08-20 19:48
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('passive_data_kit', '0065_devicemodel_reference'),
]
operations = [
migrations.AddField(
model_name='deviceissue',
name='platform',
field=models.CharField(blank=True, max_length=1048576, null=True),
),
migrations.AddField(
model_name='deviceissue',
name='platform_version',
field=models.CharField(blank=True, max_length=1048576, null=True),
),
migrations.AddField(
model_name='deviceissue',
name='user_agent',
field=models.CharField(blank=True, max_length=1048576, null=True),
),
]
| 26.875
| 78
| 0.6
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('passive_data_kit', '0065_devicemodel_reference'),
]
operations = [
migrations.AddField(
model_name='deviceissue',
name='platform',
field=models.CharField(blank=True, max_length=1048576, null=True),
),
migrations.AddField(
model_name='deviceissue',
name='platform_version',
field=models.CharField(blank=True, max_length=1048576, null=True),
),
migrations.AddField(
model_name='deviceissue',
name='user_agent',
field=models.CharField(blank=True, max_length=1048576, null=True),
),
]
| true
| true
|
1c4946a18d3acce164e58e7d5d801355d9aea016
| 3,169
|
py
|
Python
|
tensorboard/tools/whitespace_hygiene_test.py
|
isabella232/tensorboard
|
77cf61f74dd57e4f3a6256e3972335bbd82feb51
|
[
"Apache-2.0"
] | null | null | null |
tensorboard/tools/whitespace_hygiene_test.py
|
isabella232/tensorboard
|
77cf61f74dd57e4f3a6256e3972335bbd82feb51
|
[
"Apache-2.0"
] | 1
|
2021-02-24T00:55:12.000Z
|
2021-02-24T00:55:12.000Z
|
tensorboard/tools/whitespace_hygiene_test.py
|
isabella232/tensorboard
|
77cf61f74dd57e4f3a6256e3972335bbd82feb51
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Check for superfluous whitespace at ends of lines.
Keeps diffs clean and persnickety developers happy.
"""
import collections
import os
import subprocess
import sys
exceptions = frozenset(
[
# End-of-line whitespace is semantic in patch files when a line
# contains a single space.
"third_party/mock_call_assertions.patch",
]
)
Match = collections.namedtuple("Match", ("filename", "line_number", "line"))
def main():
chdir_to_repo_root()
matches = git_grep(" *$")
errors = [m for m in matches if m.filename not in exceptions]
okay = True
if errors:
print("Superfluous trailing whitespace:")
for error in errors:
print("%s:%d:%s$" % (error.filename, error.line_number, error.line))
print()
okay = False
stale_exceptions = exceptions - frozenset(m.filename for m in matches)
if stale_exceptions:
print(
"Stale exceptions (no whitespace problems; prune exceptions list):"
)
for filename in stale_exceptions:
print(filename)
print()
okay = False
sys.exit(0 if okay else 1)
def git_grep(pattern):
"""Run `git grep` and collect matches.
This function exits the process if `git grep` writes any stderr: for
instance, if the provided pattern is an invalid regular expression.
Args:
pattern: `str`; a pattern argument to `git grep`.
Returns:
A list of `Match` values.
"""
cmd = ["git", "grep", "-Izn", "--", pattern]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if stderr:
getattr(sys.stderr, "buffer", sys.stderr).write(
stderr
) # Python 2 compat
sys.exit(1)
result = []
for (
line
) in stdout.splitlines(): # assumes no newline characters in filenames
(filename_raw, line_number_raw, line_raw) = line.split(b"\0", 2)
match = Match(
filename=filename_raw.decode("utf-8", errors="replace"),
line_number=int(line_number_raw),
line=line_raw.decode("utf-8", errors="replace"),
)
result.append(match)
return result
def chdir_to_repo_root():
toplevel = subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
toplevel = toplevel[:-1] # trim trailing LF
os.chdir(toplevel)
if __name__ == "__main__":
main()
| 29.342593
| 80
| 0.634269
|
import collections
import os
import subprocess
import sys
exceptions = frozenset(
[
"third_party/mock_call_assertions.patch",
]
)
Match = collections.namedtuple("Match", ("filename", "line_number", "line"))
def main():
chdir_to_repo_root()
matches = git_grep(" *$")
errors = [m for m in matches if m.filename not in exceptions]
okay = True
if errors:
print("Superfluous trailing whitespace:")
for error in errors:
print("%s:%d:%s$" % (error.filename, error.line_number, error.line))
print()
okay = False
stale_exceptions = exceptions - frozenset(m.filename for m in matches)
if stale_exceptions:
print(
"Stale exceptions (no whitespace problems; prune exceptions list):"
)
for filename in stale_exceptions:
print(filename)
print()
okay = False
sys.exit(0 if okay else 1)
def git_grep(pattern):
cmd = ["git", "grep", "-Izn", "--", pattern]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(stdout, stderr) = p.communicate()
if stderr:
getattr(sys.stderr, "buffer", sys.stderr).write(
stderr
) sys.exit(1)
result = []
for (
line
) in stdout.splitlines(): (filename_raw, line_number_raw, line_raw) = line.split(b"\0", 2)
match = Match(
filename=filename_raw.decode("utf-8", errors="replace"),
line_number=int(line_number_raw),
line=line_raw.decode("utf-8", errors="replace"),
)
result.append(match)
return result
def chdir_to_repo_root():
toplevel = subprocess.check_output(["git", "rev-parse", "--show-toplevel"])
toplevel = toplevel[:-1] os.chdir(toplevel)
if __name__ == "__main__":
main()
| true
| true
|
1c49472d0f5e80c89a16bc24af6c12fc4c561fcb
| 2,362
|
py
|
Python
|
src/third_party/beaengine/tests/0f3850.py
|
CrackerCat/rp
|
5fe693c26d76b514efaedb4084f6e37d820db023
|
[
"MIT"
] | 1
|
2022-01-17T17:40:29.000Z
|
2022-01-17T17:40:29.000Z
|
src/third_party/beaengine/tests/0f3850.py
|
CrackerCat/rp
|
5fe693c26d76b514efaedb4084f6e37d820db023
|
[
"MIT"
] | null | null | null |
src/third_party/beaengine/tests/0f3850.py
|
CrackerCat/rp
|
5fe693c26d76b514efaedb4084f6e37d820db023
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
# @author : beaengine@gmail.com
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
# EVEX.128.66.0F38.W0 50 /r
# VPDPBUSD xmm1{k1}{z}, xmm2, xmm3/m128/m32bcst
myEVEX = EVEX('EVEX.128.66.0F38.W0')
myEVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}500e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x50)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpdpbusd')
assert_equal(myDisasm.repr(), 'vpdpbusd xmm25, xmm16, xmmword ptr [r14]')
# EVEX.256.66.0F38.W0 50 /r
# VPDPBUSD ymm1{k1}{z}, ymm2, ymm3/m256/m32bcst
myEVEX = EVEX('EVEX.256.66.0F38.W0')
myEVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}500e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x50)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpdpbusd')
assert_equal(myDisasm.repr(), 'vpdpbusd ymm25, ymm16, ymmword ptr [r14]')
# EVEX.512.66.0F38.W0 50 /r
# VPDPBUSD zmm1{k1}{z}, zmm2, zmm3/m512/m32bcst
myEVEX = EVEX('EVEX.512.66.0F38.W0')
myEVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}500e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x50)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpdpbusd')
assert_equal(myDisasm.repr(), 'vpdpbusd zmm25, zmm16, zmmword ptr [r14]')
| 40.033898
| 81
| 0.662151
|
from headers.BeaEnginePython import *
from nose.tools import *
class TestSuite:
def test(self):
myEVEX = EVEX('EVEX.128.66.0F38.W0')
myEVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}500e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x50)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpdpbusd')
assert_equal(myDisasm.repr(), 'vpdpbusd xmm25, xmm16, xmmword ptr [r14]')
myEVEX = EVEX('EVEX.256.66.0F38.W0')
myEVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}500e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x50)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpdpbusd')
assert_equal(myDisasm.repr(), 'vpdpbusd ymm25, ymm16, ymmword ptr [r14]')
myEVEX = EVEX('EVEX.512.66.0F38.W0')
myEVEX.vvvv = 0b1111
Buffer = bytes.fromhex('{}500e'.format(myEVEX.prefix()))
myDisasm = Disasm(Buffer)
myDisasm.read()
assert_equal(myDisasm.infos.Instruction.Opcode, 0x50)
assert_equal(myDisasm.infos.Instruction.Mnemonic, b'vpdpbusd')
assert_equal(myDisasm.repr(), 'vpdpbusd zmm25, zmm16, zmmword ptr [r14]')
| true
| true
|
1c49473fd7bb5ce515eff66c03f9cbc72d5e5171
| 719
|
py
|
Python
|
assistant/configurations/theme.py
|
AmulyaParitosh/Virtual-Assistant
|
b1a0e6d8569a481558bd04c2d9295a6933536ed4
|
[
"MIT"
] | null | null | null |
assistant/configurations/theme.py
|
AmulyaParitosh/Virtual-Assistant
|
b1a0e6d8569a481558bd04c2d9295a6933536ed4
|
[
"MIT"
] | null | null | null |
assistant/configurations/theme.py
|
AmulyaParitosh/Virtual-Assistant
|
b1a0e6d8569a481558bd04c2d9295a6933536ed4
|
[
"MIT"
] | null | null | null |
import json
information = json.loads(open('assistant/configurations/themes.json').read())
Theme = "Shizuka"
for theme in information["Themes"]:
if theme["name"] == Theme:
name = theme["name"]
voice = theme["voice"]
art = theme["ascii"]
bg_image = theme["bg_image"]
label_bg_colour = theme["label_bg_colour"]
scrolltext_bg_colour = theme["scrolltext_bg_colour"]
button_colour = theme["button_colour"]
fg_colour = theme["fg_colour"]
base_font = theme["base_font"]
title_font = theme["title_font"]
def get_themes():
for theme in information["Themes"]:
print(theme["name"])
if __name__ == "__main__":
get_themes()
| 23.966667
| 77
| 0.628651
|
import json
information = json.loads(open('assistant/configurations/themes.json').read())
Theme = "Shizuka"
for theme in information["Themes"]:
if theme["name"] == Theme:
name = theme["name"]
voice = theme["voice"]
art = theme["ascii"]
bg_image = theme["bg_image"]
label_bg_colour = theme["label_bg_colour"]
scrolltext_bg_colour = theme["scrolltext_bg_colour"]
button_colour = theme["button_colour"]
fg_colour = theme["fg_colour"]
base_font = theme["base_font"]
title_font = theme["title_font"]
def get_themes():
for theme in information["Themes"]:
print(theme["name"])
if __name__ == "__main__":
get_themes()
| true
| true
|
1c4947a8a1f80457570c9ffe5b8f4037ae19954e
| 943
|
py
|
Python
|
submission/damagereport/api/1/urls.py
|
simonprast/wopi-engine
|
b3f59782659c8be42f4064bce5281afd391833be
|
[
"BSD-Source-Code"
] | null | null | null |
submission/damagereport/api/1/urls.py
|
simonprast/wopi-engine
|
b3f59782659c8be42f4064bce5281afd391833be
|
[
"BSD-Source-Code"
] | null | null | null |
submission/damagereport/api/1/urls.py
|
simonprast/wopi-engine
|
b3f59782659c8be42f4064bce5281afd391833be
|
[
"BSD-Source-Code"
] | null | null | null |
#
# Created on Wed Nov 18 2020
#
# Copyright (c) 2020 - Simon Prast
#
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from . import api_views
urlpatterns = [
# POST - create new damage report (customer)
path('submit/', api_views.SubmitDamageReport.as_view()),
# POST - send message to a damage report (customer/admin)
path('submit/<int:report>/', api_views.SendMessage.as_view()),
# GET - show all own damage reports o/w/c (customer)
# GET - show damage reports of related users (admin)
path('show/', api_views.GetDamageReports.as_view()),
# GET - show all damage reports o/w/c + denied (admin)
path('show/all/', api_views.GetAllDamageReports.as_view()),
# GET - show all messages of as specific damage report (customer/admin)
path('show/<int:pk>/', api_views.GetDamageReportDetails.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| 29.46875
| 75
| 0.711559
|
from django.urls import path
from rest_framework.urlpatterns import format_suffix_patterns
from . import api_views
urlpatterns = [
path('submit/', api_views.SubmitDamageReport.as_view()),
path('submit/<int:report>/', api_views.SendMessage.as_view()),
path('show/', api_views.GetDamageReports.as_view()),
path('show/all/', api_views.GetAllDamageReports.as_view()),
path('show/<int:pk>/', api_views.GetDamageReportDetails.as_view()),
]
urlpatterns = format_suffix_patterns(urlpatterns)
| true
| true
|
1c494833f793e0560e6b2f5a6c672a8f1d65c98c
| 2,574
|
py
|
Python
|
odd_tableau_adapter/mappers/sheets.py
|
opendatadiscovery/odd-tableau-adapter
|
dee69398ccdbed6acbc02a13c188f5ec1f26a7e1
|
[
"Apache-2.0"
] | null | null | null |
odd_tableau_adapter/mappers/sheets.py
|
opendatadiscovery/odd-tableau-adapter
|
dee69398ccdbed6acbc02a13c188f5ec1f26a7e1
|
[
"Apache-2.0"
] | 1
|
2021-11-01T18:00:00.000Z
|
2021-11-01T18:00:00.000Z
|
odd_tableau_adapter/mappers/sheets.py
|
opendatadiscovery/odd-tableau-adapter
|
dee69398ccdbed6acbc02a13c188f5ec1f26a7e1
|
[
"Apache-2.0"
] | null | null | null |
from copy import deepcopy
from datetime import datetime
import pytz
from odd_models.models import DataEntity, DataConsumer, DataEntityType
from oddrn_generator import TableauGenerator
from . import _TABLEAU_DATETIME_FORMAT, _data_consumer_metadata_schema_url, _data_consumer_metadata_excluded_keys
from .metadata import _append_metadata_extension
def map_sheet(oddrn_generator: TableauGenerator, sheets: list[dict]) -> list[DataEntity]:
data_entities: list[DataEntity] = []
for sheet in sheets:
oddrn_generator.set_oddrn_paths(workbooks=sheet['workbook']['name'], sheets=sheet['name'])
# DataEntity
data_entity: DataEntity = DataEntity(
oddrn=oddrn_generator.get_oddrn_by_path("sheets"),
name=sheet['name'],
owner=sheet['workbook'].get('owner', {}).get('name'),
metadata=[],
type=DataEntityType.DASHBOARD,
)
data_entities.append(data_entity)
_append_metadata_extension(data_entity.metadata, _data_consumer_metadata_schema_url, sheet,
_data_consumer_metadata_excluded_keys)
if sheet['createdAt'] is not None:
data_entity.created_at = datetime.strptime(sheet['createdAt'], _TABLEAU_DATETIME_FORMAT) \
.replace(tzinfo=pytz.utc) \
.isoformat()
if sheet['updatedAt'] is not None:
data_entity.updated_at = datetime.strptime(sheet['updatedAt'], _TABLEAU_DATETIME_FORMAT) \
.replace(tzinfo=pytz.utc) \
.isoformat()
else:
if sheet['createdAt'] is not None:
data_entity.updated_at = data_entity.created_at
# DataConsumer
data_entity.data_consumer = DataConsumer(
inputs=_map_datasource_fields_to_oddrns(
oddrn_generator, sheet.get('datasourceFields', {})
),
outputs=[],
)
return data_entities
def _map_datasource_fields_to_oddrns(oddrn_generator: TableauGenerator, datasource_fields: dict) -> list[str]:
oddrn_gen = deepcopy(oddrn_generator) # do not change previous oddrn
inputs_oddrns: set = set()
for field in datasource_fields:
for table in field['upstreamTables']:
oddrn_gen.set_oddrn_paths(
databases=table.get('database', {}).get('name', ''),
schemas=table['schema'] or None,
tables=table['name']
)
inputs_oddrns.add(oddrn_gen.get_oddrn_by_path("tables"))
return list(inputs_oddrns)
| 37.852941
| 113
| 0.653458
|
from copy import deepcopy
from datetime import datetime
import pytz
from odd_models.models import DataEntity, DataConsumer, DataEntityType
from oddrn_generator import TableauGenerator
from . import _TABLEAU_DATETIME_FORMAT, _data_consumer_metadata_schema_url, _data_consumer_metadata_excluded_keys
from .metadata import _append_metadata_extension
def map_sheet(oddrn_generator: TableauGenerator, sheets: list[dict]) -> list[DataEntity]:
data_entities: list[DataEntity] = []
for sheet in sheets:
oddrn_generator.set_oddrn_paths(workbooks=sheet['workbook']['name'], sheets=sheet['name'])
data_entity: DataEntity = DataEntity(
oddrn=oddrn_generator.get_oddrn_by_path("sheets"),
name=sheet['name'],
owner=sheet['workbook'].get('owner', {}).get('name'),
metadata=[],
type=DataEntityType.DASHBOARD,
)
data_entities.append(data_entity)
_append_metadata_extension(data_entity.metadata, _data_consumer_metadata_schema_url, sheet,
_data_consumer_metadata_excluded_keys)
if sheet['createdAt'] is not None:
data_entity.created_at = datetime.strptime(sheet['createdAt'], _TABLEAU_DATETIME_FORMAT) \
.replace(tzinfo=pytz.utc) \
.isoformat()
if sheet['updatedAt'] is not None:
data_entity.updated_at = datetime.strptime(sheet['updatedAt'], _TABLEAU_DATETIME_FORMAT) \
.replace(tzinfo=pytz.utc) \
.isoformat()
else:
if sheet['createdAt'] is not None:
data_entity.updated_at = data_entity.created_at
data_entity.data_consumer = DataConsumer(
inputs=_map_datasource_fields_to_oddrns(
oddrn_generator, sheet.get('datasourceFields', {})
),
outputs=[],
)
return data_entities
def _map_datasource_fields_to_oddrns(oddrn_generator: TableauGenerator, datasource_fields: dict) -> list[str]:
oddrn_gen = deepcopy(oddrn_generator)
inputs_oddrns: set = set()
for field in datasource_fields:
for table in field['upstreamTables']:
oddrn_gen.set_oddrn_paths(
databases=table.get('database', {}).get('name', ''),
schemas=table['schema'] or None,
tables=table['name']
)
inputs_oddrns.add(oddrn_gen.get_oddrn_by_path("tables"))
return list(inputs_oddrns)
| true
| true
|
1c4949bbb3f9fca427ac14839fdc5b4b1b8faa8f
| 7,434
|
py
|
Python
|
tests/test_metadata.py
|
tskisner/sotodlib
|
9b80171129ea312bc7a61ce5c37d6abfbb3d5be9
|
[
"MIT"
] | null | null | null |
tests/test_metadata.py
|
tskisner/sotodlib
|
9b80171129ea312bc7a61ce5c37d6abfbb3d5be9
|
[
"MIT"
] | null | null | null |
tests/test_metadata.py
|
tskisner/sotodlib
|
9b80171129ea312bc7a61ce5c37d6abfbb3d5be9
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020 Simons Observatory.
# Full license can be found in the top level "LICENSE" file.
"""Demonstrate construction of some simple metadata structures. This
includes HDF5 IO helper routines, and the ObsDb/DetDb resolution and
association system used in Context/SuperLoader.
"""
import unittest
import tempfile
from sotodlib.core import metadata
from sotodlib.io.metadata import ResultSetHdfLoader, write_dataset, _decode_array
import os
import h5py
class MetadataTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tempdir.cleanup()
def test_000_support(self):
"""Test some numpy-HDF5 conversion support functions.
"""
rs = metadata.ResultSet(keys=['a_string', 'a_float', 'a_bad_string', 'a_bad_float'])
rs.rows.append(('hello', 1.2, 'yuck', 1.3))
aru = rs.asarray(hdf_compat=True)
self.assertTrue(aru.dtype['a_string'].char == 'S')
# Conversion code.
arx = _decode_array(aru, key_map={
'a_string': 'another_string',
'a_float': 'another_float',
'a_bad_string': None,
'a_bad_float': None,
})
self.assertCountEqual(arx.dtype.names, ['another_string', 'another_float'])
self.assertEqual(arx['another_string'].dtype.char, 'U')
def test_001_hdf(self):
"""Test metadata write/read to HDF5 datasets
"""
hdf_fn = os.path.join(self.tempdir.name, '_test_000_hdf.h5')
# The reason we're here today is that this things works but is
# going to be removed.
loader = ResultSetHdfLoader()
test_obs_id = 'testobs_1234'
# Create an hdf5 dataset which is a structured array with only the
# 'timeconst' column, containing the single fixed value. Since there
# are no columns with names prefixed by 'dets:' or 'obs:', this value
# will be broadcast to all observations and detectors that access it.
TGOOD = 1e-3
rs = metadata.ResultSet(keys=['timeconst'])
rs.append({'timeconst': TGOOD})
with h5py.File(hdf_fn, 'a') as fout:
# Simple one...
write_dataset(rs, fout, 'timeconst_1ms', overwrite=True)
# Simple look-up:
req = {'filename': hdf_fn,
'obs:obs_id': test_obs_id,
'dataset': 'timeconst_1ms'}
data = loader.from_loadspec(req)
self.assertCountEqual(data['timeconst'], [TGOOD])
def test_010_dbs(self):
"""Test metadata detdb/obsdb resolution system
This tests one of the more complicated cases:
- The ManifestDb includes restrictions on dets:band, so f090
is to be loaded from one dataset and f150 is to be loaded
from another.
- The two datasets both provide values for f090 and f150, so
the code has to know to ignore the ones that weren't asked
for.
"""
hdf_fn = os.path.join(self.tempdir.name, '_test_010_dbs.h5')
mandb_fn = os.path.join(self.tempdir.name, '_test_010_dbs.sqlite')
# Add two datasets to the HDF file. They are called
# "timeconst_early" and "timeconst_late" but there is no
# specific time range associated with each. Each dataset
# contains a value for bands f090 and f150. The "early" set
# has TBAD for f150 and the "late" set has TBAD for f090.
T090, T150, TBAD = 90e-3, 150e-3, 1e0
with h5py.File(hdf_fn, 'a') as fout:
# First test.
for label, tau1, tau2 in [('early', T090, TBAD),
('late', TBAD, T150)]:
rs = metadata.ResultSet(keys=['dets:band', 'timeconst'])
rs.append({'dets:band': 'f090', 'timeconst': tau1})
rs.append({'dets:band': 'f150', 'timeconst': tau2})
write_dataset(rs, fout, 'timeconst_%s' % label, overwrite=True)
# To match the early/late example we need DetDb and ObsDb.
detdb = metadata.DetDb()
detdb.create_table('base', ["`band` str", "`polcode` str"])
detdb.add_props('base', 'det1', band='f090', polcode='A')
detdb.add_props('base', 'det2', band='f090', polcode='B')
detdb.add_props('base', 'det3', band='f150', polcode='A')
detdb.add_props('base', 'det4', band='f150', polcode='B')
obsdb = metadata.ObsDb()
t_pivot = 2000010000
obsdb.add_obs_columns(['timestamp float'])
obsdb.update_obs('obs_00', {'timestamp': t_pivot - 10000})
obsdb.update_obs('obs_01', {'timestamp': t_pivot + 10000})
# Test 1 -- ManifestDb and Stored datasets both have "band" rules.
scheme = metadata.ManifestScheme() \
.add_range_match('obs:timestamp') \
.add_data_field('dets:band') \
.add_data_field('dataset')
mandb = metadata.ManifestDb(scheme=scheme)
for band, this_pivot in [('f090', t_pivot + 1e6),
('f150', t_pivot - 1e6)]:
mandb.add_entry({'dataset': 'timeconst_early',
'dets:band': band,
'obs:timestamp': (0, this_pivot)},
filename=hdf_fn)
mandb.add_entry({'dataset': 'timeconst_late',
'dets:band': band,
'obs:timestamp': (this_pivot, 4e9)},
filename=hdf_fn)
mandb.to_file(mandb_fn)
# The SuperLoader is where the logic lives to combine multiple
# results and pull out the right information in the right
# order. It should leave us with no TBAD values.
loader = metadata.SuperLoader(obsdb=obsdb, detdb=detdb)
spec_list = [
{'db': mandb_fn,
'name': 'tau&timeconst'}
]
mtod = loader.load(spec_list, {'obs:obs_id': 'obs_00'})
self.assertCountEqual(mtod['tau'], [T090, T090, T150, T150])
# Test 2: ManifestDb specifies polcode, which crosses with
# dataset band.
scheme = metadata.ManifestScheme() \
.add_range_match('obs:timestamp') \
.add_data_field('dets:polcode') \
.add_data_field('dataset')
mandb = metadata.ManifestDb(scheme=scheme)
for polcode, this_pivot in [('A', t_pivot + 1e6),
('B', t_pivot - 1e6)]:
mandb.add_entry({'dataset': 'timeconst_early',
'dets:polcode': polcode,
'obs:timestamp': (0, this_pivot)},
filename=hdf_fn)
mandb.add_entry({'dataset': 'timeconst_late',
'dets:polcode': polcode,
'obs:timestamp': (this_pivot, 4e9)},
filename=hdf_fn)
mandb.to_file(mandb_fn)
# Now we expect only f090 A and f150 B to resolve to non-bad vals.
# Make sure you reinit the loader, to avoid cached dbs.
loader = metadata.SuperLoader(obsdb=obsdb, detdb=detdb)
mtod = loader.load(spec_list, {'obs:obs_id': 'obs_00'})
self.assertCountEqual(mtod['tau'], [T090, TBAD, TBAD, T150])
if __name__ == '__main__':
unittest.main()
| 41.764045
| 92
| 0.576944
|
import unittest
import tempfile
from sotodlib.core import metadata
from sotodlib.io.metadata import ResultSetHdfLoader, write_dataset, _decode_array
import os
import h5py
class MetadataTest(unittest.TestCase):
def setUp(self):
self.tempdir = tempfile.TemporaryDirectory()
def tearDown(self):
self.tempdir.cleanup()
def test_000_support(self):
rs = metadata.ResultSet(keys=['a_string', 'a_float', 'a_bad_string', 'a_bad_float'])
rs.rows.append(('hello', 1.2, 'yuck', 1.3))
aru = rs.asarray(hdf_compat=True)
self.assertTrue(aru.dtype['a_string'].char == 'S')
arx = _decode_array(aru, key_map={
'a_string': 'another_string',
'a_float': 'another_float',
'a_bad_string': None,
'a_bad_float': None,
})
self.assertCountEqual(arx.dtype.names, ['another_string', 'another_float'])
self.assertEqual(arx['another_string'].dtype.char, 'U')
def test_001_hdf(self):
hdf_fn = os.path.join(self.tempdir.name, '_test_000_hdf.h5')
# going to be removed.
loader = ResultSetHdfLoader()
test_obs_id = 'testobs_1234'
# Create an hdf5 dataset which is a structured array with only the
# 'timeconst' column, containing the single fixed value. Since there
# are no columns with names prefixed by 'dets:' or 'obs:', this value
# will be broadcast to all observations and detectors that access it.
TGOOD = 1e-3
rs = metadata.ResultSet(keys=['timeconst'])
rs.append({'timeconst': TGOOD})
with h5py.File(hdf_fn, 'a') as fout:
# Simple one...
write_dataset(rs, fout, 'timeconst_1ms', overwrite=True)
# Simple look-up:
req = {'filename': hdf_fn,
'obs:obs_id': test_obs_id,
'dataset': 'timeconst_1ms'}
data = loader.from_loadspec(req)
self.assertCountEqual(data['timeconst'], [TGOOD])
def test_010_dbs(self):
hdf_fn = os.path.join(self.tempdir.name, '_test_010_dbs.h5')
mandb_fn = os.path.join(self.tempdir.name, '_test_010_dbs.sqlite')
# Add two datasets to the HDF file. They are called
# "timeconst_early" and "timeconst_late" but there is no
# specific time range associated with each. Each dataset
# contains a value for bands f090 and f150. The "early" set
# has TBAD for f150 and the "late" set has TBAD for f090.
T090, T150, TBAD = 90e-3, 150e-3, 1e0
with h5py.File(hdf_fn, 'a') as fout:
# First test.
for label, tau1, tau2 in [('early', T090, TBAD),
('late', TBAD, T150)]:
rs = metadata.ResultSet(keys=['dets:band', 'timeconst'])
rs.append({'dets:band': 'f090', 'timeconst': tau1})
rs.append({'dets:band': 'f150', 'timeconst': tau2})
write_dataset(rs, fout, 'timeconst_%s' % label, overwrite=True)
# To match the early/late example we need DetDb and ObsDb.
detdb = metadata.DetDb()
detdb.create_table('base', ["`band` str", "`polcode` str"])
detdb.add_props('base', 'det1', band='f090', polcode='A')
detdb.add_props('base', 'det2', band='f090', polcode='B')
detdb.add_props('base', 'det3', band='f150', polcode='A')
detdb.add_props('base', 'det4', band='f150', polcode='B')
obsdb = metadata.ObsDb()
t_pivot = 2000010000
obsdb.add_obs_columns(['timestamp float'])
obsdb.update_obs('obs_00', {'timestamp': t_pivot - 10000})
obsdb.update_obs('obs_01', {'timestamp': t_pivot + 10000})
# Test 1 -- ManifestDb and Stored datasets both have "band" rules.
scheme = metadata.ManifestScheme() \
.add_range_match('obs:timestamp') \
.add_data_field('dets:band') \
.add_data_field('dataset')
mandb = metadata.ManifestDb(scheme=scheme)
for band, this_pivot in [('f090', t_pivot + 1e6),
('f150', t_pivot - 1e6)]:
mandb.add_entry({'dataset': 'timeconst_early',
'dets:band': band,
'obs:timestamp': (0, this_pivot)},
filename=hdf_fn)
mandb.add_entry({'dataset': 'timeconst_late',
'dets:band': band,
'obs:timestamp': (this_pivot, 4e9)},
filename=hdf_fn)
mandb.to_file(mandb_fn)
# The SuperLoader is where the logic lives to combine multiple
# results and pull out the right information in the right
# order. It should leave us with no TBAD values.
loader = metadata.SuperLoader(obsdb=obsdb, detdb=detdb)
spec_list = [
{'db': mandb_fn,
'name': 'tau&timeconst'}
]
mtod = loader.load(spec_list, {'obs:obs_id': 'obs_00'})
self.assertCountEqual(mtod['tau'], [T090, T090, T150, T150])
# Test 2: ManifestDb specifies polcode, which crosses with
# dataset band.
scheme = metadata.ManifestScheme() \
.add_range_match('obs:timestamp') \
.add_data_field('dets:polcode') \
.add_data_field('dataset')
mandb = metadata.ManifestDb(scheme=scheme)
for polcode, this_pivot in [('A', t_pivot + 1e6),
('B', t_pivot - 1e6)]:
mandb.add_entry({'dataset': 'timeconst_early',
'dets:polcode': polcode,
'obs:timestamp': (0, this_pivot)},
filename=hdf_fn)
mandb.add_entry({'dataset': 'timeconst_late',
'dets:polcode': polcode,
'obs:timestamp': (this_pivot, 4e9)},
filename=hdf_fn)
mandb.to_file(mandb_fn)
# Now we expect only f090 A and f150 B to resolve to non-bad vals.
# Make sure you reinit the loader, to avoid cached dbs.
loader = metadata.SuperLoader(obsdb=obsdb, detdb=detdb)
mtod = loader.load(spec_list, {'obs:obs_id': 'obs_00'})
self.assertCountEqual(mtod['tau'], [T090, TBAD, TBAD, T150])
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c4949e8507d2d5b90702103641c5f8095dbb773
| 2,557
|
py
|
Python
|
main.py
|
fsevenm/ulauncher-uuid
|
2fbb70fd2af246277b2baff03465bc8bd971c85f
|
[
"MIT"
] | 1
|
2022-01-29T16:30:00.000Z
|
2022-01-29T16:30:00.000Z
|
main.py
|
fsevenm/ulauncher-uuid
|
2fbb70fd2af246277b2baff03465bc8bd971c85f
|
[
"MIT"
] | null | null | null |
main.py
|
fsevenm/ulauncher-uuid
|
2fbb70fd2af246277b2baff03465bc8bd971c85f
|
[
"MIT"
] | null | null | null |
import logging
import uuid
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.CopyToClipboardAction import CopyToClipboardAction
logger = logging.getLogger(__name__)
class UuidExtension(Extension):
def __init__(self):
logger.info('init UUID extension')
super(UuidExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
items = []
generated_uuids = []
accepted_versions = ["v1", "v3", "v4", "v5"]
args = None
v = "v4"
name = "python.org"
args_string = event.get_argument()
if args_string is not None:
args = args_string.split(' ') # [0]v5 [1]name
try:
if args is not None and args[0] in accepted_versions:
v = args[0]
except IndexError:
pass
try:
if args is not None and args[1] is not None:
name = args[1]
except IndexError:
pass
if v == "v1":
generated_uuids.append(["UUID v1", str(uuid.uuid1())])
elif v == "v4":
generated_uuids.append(["UUID v4", str(uuid.uuid4())])
elif v == "v3":
generated_uuids.append(["UUID v3 DNS", str(uuid.uuid3(uuid.NAMESPACE_DNS, name))])
generated_uuids.append(["UUID v3 URL", str(uuid.uuid3(uuid.NAMESPACE_URL, name))])
elif v == "v5":
generated_uuids.append(["UUID v5 DNS", str(uuid.uuid5(uuid.NAMESPACE_DNS, name))])
generated_uuids.append(["UUID v5 URL", str(uuid.uuid5(uuid.NAMESPACE_URL, name))])
for desc, uuid_value in generated_uuids:
items.append(ExtensionResultItem(icon='images/icon.png',
name=uuid_value,
description=desc,
highlightable=False,
on_enter=CopyToClipboardAction(uuid_value)
))
return RenderResultListAction(items)
if __name__ == '__main__':
UuidExtension().run()
| 35.027397
| 94
| 0.596402
|
import logging
import uuid
from ulauncher.api.client.Extension import Extension
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.shared.event import KeywordQueryEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.action.CopyToClipboardAction import CopyToClipboardAction
logger = logging.getLogger(__name__)
class UuidExtension(Extension):
def __init__(self):
logger.info('init UUID extension')
super(UuidExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
items = []
generated_uuids = []
accepted_versions = ["v1", "v3", "v4", "v5"]
args = None
v = "v4"
name = "python.org"
args_string = event.get_argument()
if args_string is not None:
args = args_string.split(' ')
try:
if args is not None and args[0] in accepted_versions:
v = args[0]
except IndexError:
pass
try:
if args is not None and args[1] is not None:
name = args[1]
except IndexError:
pass
if v == "v1":
generated_uuids.append(["UUID v1", str(uuid.uuid1())])
elif v == "v4":
generated_uuids.append(["UUID v4", str(uuid.uuid4())])
elif v == "v3":
generated_uuids.append(["UUID v3 DNS", str(uuid.uuid3(uuid.NAMESPACE_DNS, name))])
generated_uuids.append(["UUID v3 URL", str(uuid.uuid3(uuid.NAMESPACE_URL, name))])
elif v == "v5":
generated_uuids.append(["UUID v5 DNS", str(uuid.uuid5(uuid.NAMESPACE_DNS, name))])
generated_uuids.append(["UUID v5 URL", str(uuid.uuid5(uuid.NAMESPACE_URL, name))])
for desc, uuid_value in generated_uuids:
items.append(ExtensionResultItem(icon='images/icon.png',
name=uuid_value,
description=desc,
highlightable=False,
on_enter=CopyToClipboardAction(uuid_value)
))
return RenderResultListAction(items)
if __name__ == '__main__':
UuidExtension().run()
| true
| true
|
1c494a3c50140aabc3b3a441f1a35000c0f75722
| 189
|
py
|
Python
|
pset_loops/loop_basics/p5.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 5
|
2019-04-08T20:05:37.000Z
|
2019-12-04T20:48:45.000Z
|
pset_loops/loop_basics/p5.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 8
|
2019-04-15T15:16:05.000Z
|
2022-02-12T10:33:32.000Z
|
pset_loops/loop_basics/p5.py
|
mottaquikarim/pydev-psets
|
9749e0d216ee0a5c586d0d3013ef481cc21dee27
|
[
"MIT"
] | 2
|
2019-04-10T00:14:42.000Z
|
2020-02-26T20:35:21.000Z
|
"""
Factors
"""
# Find all factors of a number that a user inputs and print out 'The factors of <the_user_input_number> are: '.
user_input = input('Enter a number to find its factors: ')
| 23.625
| 111
| 0.714286
|
user_input = input('Enter a number to find its factors: ')
| true
| true
|
1c494c0c55610349d045688af032b680b719446c
| 2,457
|
py
|
Python
|
pypeln/process/api/ordered.py
|
quarckster/pypeln
|
f4160d0f4d4718b67f79a0707d7261d249459a4b
|
[
"MIT"
] | 1,281
|
2018-09-20T05:35:27.000Z
|
2022-03-30T01:29:48.000Z
|
pypeln/process/api/ordered.py
|
webclinic017/pypeln
|
5231806f2cac9d2019dacbbcf913484fd268b8c1
|
[
"MIT"
] | 78
|
2018-09-18T20:38:12.000Z
|
2022-03-30T20:16:02.000Z
|
pypeln/process/api/ordered.py
|
webclinic017/pypeln
|
5231806f2cac9d2019dacbbcf913484fd268b8c1
|
[
"MIT"
] | 88
|
2018-09-24T10:46:14.000Z
|
2022-03-28T09:34:50.000Z
|
import bisect
import typing as tp
from pypeln import utils as pypeln_utils
from pypeln.utils import A, B, T
from ..stage import Stage
from ..worker import ProcessFn, Worker
from .to_stage import to_stage
class Ordered(tp.NamedTuple):
def __call__(self, worker: Worker, **kwargs):
elems = []
for elem in worker.stage_params.input_queue:
bisect.insort(elems, elem)
for _ in range(len(elems)):
worker.stage_params.output_queues.put(elems.pop(0))
@tp.overload
def ordered(
stage: Stage[A],
maxsize: int = 0,
) -> Stage[A]:
...
@tp.overload
def ordered(maxsize: int = 0) -> pypeln_utils.Partial[Stage[A]]:
...
def ordered(
stage: tp.Union[
Stage[A], tp.Iterable[A], pypeln_utils.Undefined
] = pypeln_utils.UNDEFINED,
maxsize: int = 0,
) -> tp.Union[Stage[A], pypeln_utils.Partial[Stage[A]]]:
"""
Creates a stage that sorts its elements based on their order of creation on the source iterable(s) of the pipeline.
```python
import pypeln as pl
import random
import time
def slow_squared(x):
time.sleep(random.random())
return x ** 2
stage = range(5)
stage = pl.process.map(slow_squared, stage, workers = 2)
stage = pl.process.ordered(stage)
print(list(stage)) # [0, 1, 4, 9, 16]
```
!!! note
`ordered` will work even if the previous stages are from different `pypeln` modules, but it may not work if you introduce an itermediate external iterable stage.
!!! warning
This stage will not yield util it accumulates all of the elements from the previous stage, use this only if all elements fit in memory.
Arguments:
stage: A Stage or Iterable.
maxsize: The maximum number of objects the stage can hold simultaneously, if set to `0` (default) then the stage can grow unbounded.
Returns:
If the `stage` parameters is given then this function returns an iterable, else it returns a `Partial`.
"""
if isinstance(stage, pypeln_utils.Undefined):
return pypeln_utils.Partial(lambda stage: ordered(stage))
stage = to_stage(stage, maxsize=maxsize)
return Stage(
process_fn=Ordered(),
workers=1,
maxsize=maxsize,
timeout=0,
total_sources=stage.workers,
dependencies=[stage],
on_start=None,
on_done=None,
use_threads=False,
f_args=[],
)
| 26.138298
| 169
| 0.64998
|
import bisect
import typing as tp
from pypeln import utils as pypeln_utils
from pypeln.utils import A, B, T
from ..stage import Stage
from ..worker import ProcessFn, Worker
from .to_stage import to_stage
class Ordered(tp.NamedTuple):
def __call__(self, worker: Worker, **kwargs):
elems = []
for elem in worker.stage_params.input_queue:
bisect.insort(elems, elem)
for _ in range(len(elems)):
worker.stage_params.output_queues.put(elems.pop(0))
@tp.overload
def ordered(
stage: Stage[A],
maxsize: int = 0,
) -> Stage[A]:
...
@tp.overload
def ordered(maxsize: int = 0) -> pypeln_utils.Partial[Stage[A]]:
...
def ordered(
stage: tp.Union[
Stage[A], tp.Iterable[A], pypeln_utils.Undefined
] = pypeln_utils.UNDEFINED,
maxsize: int = 0,
) -> tp.Union[Stage[A], pypeln_utils.Partial[Stage[A]]]:
if isinstance(stage, pypeln_utils.Undefined):
return pypeln_utils.Partial(lambda stage: ordered(stage))
stage = to_stage(stage, maxsize=maxsize)
return Stage(
process_fn=Ordered(),
workers=1,
maxsize=maxsize,
timeout=0,
total_sources=stage.workers,
dependencies=[stage],
on_start=None,
on_done=None,
use_threads=False,
f_args=[],
)
| true
| true
|
1c494d4b0502d2f40178993523ea1cca94619b40
| 55
|
py
|
Python
|
lahman/__init__.py
|
PeterA182/liteSaber
|
6560feb70fd23916c0188ba98a751f8fee99a18b
|
[
"MIT"
] | null | null | null |
lahman/__init__.py
|
PeterA182/liteSaber
|
6560feb70fd23916c0188ba98a751f8fee99a18b
|
[
"MIT"
] | null | null | null |
lahman/__init__.py
|
PeterA182/liteSaber
|
6560feb70fd23916c0188ba98a751f8fee99a18b
|
[
"MIT"
] | 1
|
2019-06-28T01:19:38.000Z
|
2019-06-28T01:19:38.000Z
|
__author__ = 'Peter Altamura'
from lahman import Lahman
| 27.5
| 29
| 0.818182
|
__author__ = 'Peter Altamura'
from lahman import Lahman
| true
| true
|
1c494dfc5b9895a242fb3bc427570d5fcb2fd608
| 12,414
|
py
|
Python
|
lib/pavilion/expression_functions/base.py
|
ubccr/pavilion2
|
4c6d043b436761d9162d8824657f51cedc9907cc
|
[
"BSD-3-Clause"
] | null | null | null |
lib/pavilion/expression_functions/base.py
|
ubccr/pavilion2
|
4c6d043b436761d9162d8824657f51cedc9907cc
|
[
"BSD-3-Clause"
] | null | null | null |
lib/pavilion/expression_functions/base.py
|
ubccr/pavilion2
|
4c6d043b436761d9162d8824657f51cedc9907cc
|
[
"BSD-3-Clause"
] | null | null | null |
"""Contains the base Expression Function plugin class."""
import logging
import re
import inspect
from yapsy import IPlugin
LOGGER = logging.getLogger(__file__)
# The dictionary of available function plugins.
_FUNCTIONS = {} # type: {str,FunctionPlugin}
class FunctionPluginError(RuntimeError):
"""Error raised when there's a problem with a function plugin
itself."""
class FunctionArgError(ValueError):
"""Error raised when a function plugin has a problem with the
function arguments."""
def num(val):
"""Return val as an int, float, or bool, depending on what it most
closely resembles."""
if isinstance(val, (float, int)):
return val
elif val in ('True', 'False'):
return val == 'True'
elif isinstance(val, str):
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
raise ValueError("Could not convert '{}' to either "
"int or float.")
raise RuntimeError("Invalid value '{}' given to num.".format(val))
class FunctionPlugin(IPlugin.IPlugin):
"""Plugin base class for math functions.
Child classes must override ``__init__`` (as is typical for Pavilion
plugin), and must also provide a method to act as the function itself.
This method must have the same name as the plugin (ie. The 'max' plugin
must have a 'max' method), and take the arguments the function expects.
"""
VALID_SPEC_TYPES = (
int,
float,
str,
bool,
num,
)
NAME_RE = re.compile(r'[a-zA-Z][a-zA-Z0-9_]*$')
PRIO_CORE = 0
PRIO_COMMON = 10
PRIO_USER = 20
def __init__(self, name, description, arg_specs,
priority=PRIO_COMMON):
"""
:param str name: The name of this function.
:param str description: A short description of this function.
:param int priority: The plugin priority.
:param [type] arg_specs: A list of type specs for each function
argument. The spec for each argument defines what structure
and types the value will have, and the auto-conversions that
will happen if possible. ``None`` denotes that arg_specs
won't be used or validated, and requires that ``_validate_arg`` be
overridden.
"""
if not self.NAME_RE.match(name):
raise FunctionPluginError(
"Invalid function name: '{}'".format(name))
self.name = name
self.description = description
self.priority = priority
sig = inspect.signature(getattr(self, self.name))
if arg_specs is None:
if self._validate_arg is FunctionPlugin._validate_arg:
raise RuntimeError(
"Function plugin {} at {} was given an arg_spec of "
"'None', but did not override '_validate_arg'."
.format(self.name, self.path)
)
if self.__class__.signature is FunctionPlugin.signature:
raise RuntimeError(
"Function plugin {} at {} was given an arg_spec of "
"'None', but did not override 'signature'."
.format(self.name, self.path)
)
else:
if len(sig.parameters) != len(arg_specs):
raise FunctionPluginError(
"Invalid arg specs. The function takes {} arguments, but"
"an arg_spec of length {} was provided."
.format(len(sig.parameters), len(arg_specs)))
for arg_spec in arg_specs:
self._validate_arg_spec(arg_spec)
self.arg_specs = arg_specs
super().__init__()
def _validate_arg_spec(self, arg):
"""Recursively validate the argument spec, to make sure plugin
creators are using this right.
:param arg: A valid arg spec is a structure of lists and
dicts, and types from self.VALID_SPEC_TYPES.
- Lists should contain one representative containing type.
- Dicts should have at least one key-value pair (with string keys).
- Dict specs don't have to contain every key the dict might have,
just those that will be used.
- Specs may be any structure of these types, as long
as they comply with the above rules.
- The 'num' spec type will accept strings, floats, ints,
or bool. ints and floats are left alone, bools become
ints, and strings become an int or a float if they can.
:raises FunctionPluginError: On a bad arg spec.
"""
if isinstance(arg, list):
if len(arg) != 1:
raise FunctionPluginError(
"Invalid list spec argument. List arguments must contain "
"a single subtype. This had '{}'."
.format(arg)
)
self._validate_arg_spec(arg[0])
elif isinstance(arg, dict):
if len(arg) == 0:
raise FunctionPluginError(
"Invalid dict spec argument. Dict arguments must contain "
"at least one key-value pair. This had '{}'."
.format(arg)
)
for key, sub_arg in arg.items():
self._validate_arg_spec(sub_arg)
elif arg not in self.VALID_SPEC_TYPES:
raise FunctionPluginError(
"Invalid spec type '{}'. Must be one of '{}'"
.format(arg, self.VALID_SPEC_TYPES)
)
@property
def path(self):
"""The path to the file containing this result parser plugin."""
return inspect.getfile(self.__class__)
def __call__(self, *args):
"""Validate/convert the arguments and call the function."""
if self.arg_specs is not None:
if len(args) != len(self.arg_specs):
raise FunctionPluginError(
"Invalid number of arguments defined for function {}. Got "
"{}, but expected {}"
.format(self.name, len(args), len(self.arg_specs)))
# Create the full list of validated arguments.
val_args = []
for arg, spec in zip(args, self.arg_specs):
val_args.append(self._validate_arg(arg, spec))
else:
val_args = args
try:
func = getattr(self, self.name)
return func(*val_args)
except Exception as err:
raise FunctionPluginError(
"Error in function plugin {}: {}"
.format(self.name, err)
)
@property
def signature(self):
"""Generate a function signature for this function.
:newlines: Put each argument on a separate line.
"""
sig = inspect.signature(getattr(self, self.name))
arg_names = list(sig.parameters.keys())
parts = [self.name + '(']
arg_parts = []
for i in range(len(arg_names)):
arg_name = arg_names[i]
spec = self.arg_specs[i]
arg_parts.append(
'{}: {}'.format(arg_name, self._spec_to_desc(spec)))
parts.append(', '.join(arg_parts))
parts.append(')')
return ''.join(parts)
@property
def long_description(self):
"""Return the docstring for the function."""
func = getattr(self, self.name)
desc = func.__doc__
return ' '.join(desc.split())
def _spec_to_desc(self, spec):
"""Convert an argument spec into a descriptive structure that
can be reasonably printed."""
if isinstance(spec, list):
return [self._spec_to_desc(spec[0])]
elif isinstance(spec, dict):
return {k: self._spec_to_desc(v) for k, v in spec.items()}
else:
return spec.__name__
def _validate_arg(self, arg, spec):
"""Ensure that the argument is of the structure specified by 'spec',
and convert all contained values accordingly.
:param arg: The argument to validate.
:param Union[list,dict,int,bool,str,float] spec: The spec to apply to
this argument.
:return: The validated, auto-converted argument.
"""
if isinstance(spec, list):
if not isinstance(arg, list):
raise FunctionPluginError(
"Invalid argument '{}'. Expected a list."
.format(arg)
)
val_args = []
for arg_item in arg:
try:
val_args.append(self._validate_arg(arg_item, spec[0]))
except FunctionPluginError:
raise FunctionPluginError(
"Invalid list item argument '{}'. Expected a list of "
"'{}'."
.format(arg_item, spec[0]))
return val_args
if isinstance(spec, dict):
if not isinstance(arg, dict):
raise FunctionPluginError(
"Invalid argument '{}'. Expected a dict."
.format(arg))
val_args = {}
for key, sub_spec in spec.items():
if key not in arg:
raise FunctionPluginError(
"Invalid dict argument '{}'. Missing key '{}'"
.format(arg, key))
try:
val_args[key] = self._validate_arg(arg[key], sub_spec)
except FunctionPluginError as err:
raise FunctionPluginError(
"Invalid dict argument '{}' for key '{}': {}"
.format(arg[key], key, err))
return val_args
try:
# Boolean strings need a little conversion help when
# converting to other types. The num type takes care of this
# internally.
if spec in (int, float) and arg in ('True', 'False'):
arg = bool(arg)
return spec(arg)
except ValueError:
raise FunctionPluginError(
"Invalid {} ({})"
.format(spec.__name__, arg))
def activate(self):
"""Yapsy runs this when adding the plugin. Add our plugin
to the registry of function plugins."""
if self.name in _FUNCTIONS:
other = _FUNCTIONS[self.name]
if self.priority > other.priority:
LOGGER.info(
"Function plugin '%s' at %s is superceded by plugin at %s",
self.name, other.path, self.path)
_FUNCTIONS[self.name] = self
elif self.priority < other.priority:
LOGGER.info(
"Function plugin '%s' at %s is ignored in lieu of "
"plugin at %s.",
self.name, self.path, other.path)
else:
raise RuntimeError(
"Function plugin conflict. Parser '{}' at '{}'"
"has the same priority as plugin at '{}'"
.format(self.name, self.path, other.path))
else:
_FUNCTIONS[self.name] = self
def deactivate(self):
"""Yapsy runs this when removing the plugin. Plugins will
only be removed by unit tests."""
del _FUNCTIONS[self.name]
class CoreFunctionPlugin(FunctionPlugin):
"""A function plugin that sets defaults for core plugins. Use when adding
additional function plugins to the core_functions module."""
def __init__(self, name, description, arg_specs):
super().__init__(name, description, arg_specs,
priority=self.PRIO_CORE)
def register_core_plugins():
"""Find all the core function plugins and activate them."""
# We need to load this module just to define all the included classes.
from pavilion.expression_functions import core
_ = core
for cls in CoreFunctionPlugin.__subclasses__():
obj = cls()
obj.activate()
def __reset():
"""Reset all function plugins. For testing only."""
for plugin in list(_FUNCTIONS.values()):
plugin.deactivate()
| 34.483333
| 79
| 0.556791
|
import logging
import re
import inspect
from yapsy import IPlugin
LOGGER = logging.getLogger(__file__)
_FUNCTIONS = {}
class FunctionPluginError(RuntimeError):
class FunctionArgError(ValueError):
def num(val):
if isinstance(val, (float, int)):
return val
elif val in ('True', 'False'):
return val == 'True'
elif isinstance(val, str):
try:
return int(val)
except ValueError:
pass
try:
return float(val)
except ValueError:
raise ValueError("Could not convert '{}' to either "
"int or float.")
raise RuntimeError("Invalid value '{}' given to num.".format(val))
class FunctionPlugin(IPlugin.IPlugin):
VALID_SPEC_TYPES = (
int,
float,
str,
bool,
num,
)
NAME_RE = re.compile(r'[a-zA-Z][a-zA-Z0-9_]*$')
PRIO_CORE = 0
PRIO_COMMON = 10
PRIO_USER = 20
def __init__(self, name, description, arg_specs,
priority=PRIO_COMMON):
if not self.NAME_RE.match(name):
raise FunctionPluginError(
"Invalid function name: '{}'".format(name))
self.name = name
self.description = description
self.priority = priority
sig = inspect.signature(getattr(self, self.name))
if arg_specs is None:
if self._validate_arg is FunctionPlugin._validate_arg:
raise RuntimeError(
"Function plugin {} at {} was given an arg_spec of "
"'None', but did not override '_validate_arg'."
.format(self.name, self.path)
)
if self.__class__.signature is FunctionPlugin.signature:
raise RuntimeError(
"Function plugin {} at {} was given an arg_spec of "
"'None', but did not override 'signature'."
.format(self.name, self.path)
)
else:
if len(sig.parameters) != len(arg_specs):
raise FunctionPluginError(
"Invalid arg specs. The function takes {} arguments, but"
"an arg_spec of length {} was provided."
.format(len(sig.parameters), len(arg_specs)))
for arg_spec in arg_specs:
self._validate_arg_spec(arg_spec)
self.arg_specs = arg_specs
super().__init__()
def _validate_arg_spec(self, arg):
if isinstance(arg, list):
if len(arg) != 1:
raise FunctionPluginError(
"Invalid list spec argument. List arguments must contain "
"a single subtype. This had '{}'."
.format(arg)
)
self._validate_arg_spec(arg[0])
elif isinstance(arg, dict):
if len(arg) == 0:
raise FunctionPluginError(
"Invalid dict spec argument. Dict arguments must contain "
"at least one key-value pair. This had '{}'."
.format(arg)
)
for key, sub_arg in arg.items():
self._validate_arg_spec(sub_arg)
elif arg not in self.VALID_SPEC_TYPES:
raise FunctionPluginError(
"Invalid spec type '{}'. Must be one of '{}'"
.format(arg, self.VALID_SPEC_TYPES)
)
@property
def path(self):
return inspect.getfile(self.__class__)
def __call__(self, *args):
if self.arg_specs is not None:
if len(args) != len(self.arg_specs):
raise FunctionPluginError(
"Invalid number of arguments defined for function {}. Got "
"{}, but expected {}"
.format(self.name, len(args), len(self.arg_specs)))
val_args = []
for arg, spec in zip(args, self.arg_specs):
val_args.append(self._validate_arg(arg, spec))
else:
val_args = args
try:
func = getattr(self, self.name)
return func(*val_args)
except Exception as err:
raise FunctionPluginError(
"Error in function plugin {}: {}"
.format(self.name, err)
)
@property
def signature(self):
sig = inspect.signature(getattr(self, self.name))
arg_names = list(sig.parameters.keys())
parts = [self.name + '(']
arg_parts = []
for i in range(len(arg_names)):
arg_name = arg_names[i]
spec = self.arg_specs[i]
arg_parts.append(
'{}: {}'.format(arg_name, self._spec_to_desc(spec)))
parts.append(', '.join(arg_parts))
parts.append(')')
return ''.join(parts)
@property
def long_description(self):
func = getattr(self, self.name)
desc = func.__doc__
return ' '.join(desc.split())
def _spec_to_desc(self, spec):
if isinstance(spec, list):
return [self._spec_to_desc(spec[0])]
elif isinstance(spec, dict):
return {k: self._spec_to_desc(v) for k, v in spec.items()}
else:
return spec.__name__
def _validate_arg(self, arg, spec):
if isinstance(spec, list):
if not isinstance(arg, list):
raise FunctionPluginError(
"Invalid argument '{}'. Expected a list."
.format(arg)
)
val_args = []
for arg_item in arg:
try:
val_args.append(self._validate_arg(arg_item, spec[0]))
except FunctionPluginError:
raise FunctionPluginError(
"Invalid list item argument '{}'. Expected a list of "
"'{}'."
.format(arg_item, spec[0]))
return val_args
if isinstance(spec, dict):
if not isinstance(arg, dict):
raise FunctionPluginError(
"Invalid argument '{}'. Expected a dict."
.format(arg))
val_args = {}
for key, sub_spec in spec.items():
if key not in arg:
raise FunctionPluginError(
"Invalid dict argument '{}'. Missing key '{}'"
.format(arg, key))
try:
val_args[key] = self._validate_arg(arg[key], sub_spec)
except FunctionPluginError as err:
raise FunctionPluginError(
"Invalid dict argument '{}' for key '{}': {}"
.format(arg[key], key, err))
return val_args
try:
if spec in (int, float) and arg in ('True', 'False'):
arg = bool(arg)
return spec(arg)
except ValueError:
raise FunctionPluginError(
"Invalid {} ({})"
.format(spec.__name__, arg))
def activate(self):
if self.name in _FUNCTIONS:
other = _FUNCTIONS[self.name]
if self.priority > other.priority:
LOGGER.info(
"Function plugin '%s' at %s is superceded by plugin at %s",
self.name, other.path, self.path)
_FUNCTIONS[self.name] = self
elif self.priority < other.priority:
LOGGER.info(
"Function plugin '%s' at %s is ignored in lieu of "
"plugin at %s.",
self.name, self.path, other.path)
else:
raise RuntimeError(
"Function plugin conflict. Parser '{}' at '{}'"
"has the same priority as plugin at '{}'"
.format(self.name, self.path, other.path))
else:
_FUNCTIONS[self.name] = self
def deactivate(self):
del _FUNCTIONS[self.name]
class CoreFunctionPlugin(FunctionPlugin):
def __init__(self, name, description, arg_specs):
super().__init__(name, description, arg_specs,
priority=self.PRIO_CORE)
def register_core_plugins():
from pavilion.expression_functions import core
_ = core
for cls in CoreFunctionPlugin.__subclasses__():
obj = cls()
obj.activate()
def __reset():
for plugin in list(_FUNCTIONS.values()):
plugin.deactivate()
| true
| true
|
1c494f5029b56fe8b217e4d957810d9edc58d324
| 15,887
|
py
|
Python
|
model/model.py
|
eaidova/UNITER
|
5b4c9faf8ed922176b20d89ac56a3e0b39374a22
|
[
"MIT"
] | 612
|
2020-01-28T00:34:23.000Z
|
2022-03-31T00:40:06.000Z
|
model/model.py
|
eaidova/UNITER
|
5b4c9faf8ed922176b20d89ac56a3e0b39374a22
|
[
"MIT"
] | 90
|
2020-02-18T10:54:40.000Z
|
2022-03-17T07:36:35.000Z
|
model/model.py
|
eaidova/UNITER
|
5b4c9faf8ed922176b20d89ac56a3e0b39374a22
|
[
"MIT"
] | 114
|
2020-01-31T03:03:25.000Z
|
2022-03-17T15:53:51.000Z
|
"""
Copyright (c) Microsoft Corporation.
Licensed under the MIT license.
Pytorch modules
some classes are modified from HuggingFace
(https://github.com/huggingface/transformers)
"""
import copy
import json
import logging
from io import open
import torch
from torch import nn
from apex.normalization.fused_layer_norm import FusedLayerNorm
from .layer import BertLayer, BertPooler
logger = logging.getLogger(__name__)
class UniterConfig(object):
"""Configuration class to store the configuration of a `UniterModel`.
"""
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
"""Constructs UniterConfig.
Args:
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in
`UniterModel`.
hidden_size: Size of the encoder layers and the pooler layer.
num_hidden_layers: Number of hidden layers in the Transformer
encoder.
num_attention_heads: Number of attention heads for each attention
layer in the Transformer encoder.
intermediate_size: The size of the "intermediate" (i.e.
feed-forward) layer in the Transformer encoder.
hidden_act: The non-linear activation function (function or string)
in the encoder and pooler. If string, "gelu", "relu" and
"swish" are supported.
hidden_dropout_prob: The dropout probabilitiy for all fully
connected layers in the embeddings, encoder, and pooler.
attention_probs_dropout_prob: The dropout ratio for the attention
probabilities.
max_position_embeddings: The maximum sequence length that this
model might ever be used with. Typically set this to something
large just in case (e.g., 512 or 1024 or 2048).
type_vocab_size: The vocabulary size of the `token_type_ids` passed
into `UniterModel`.
initializer_range: The sttdev of the truncated_normal_initializer
for initializing all weight matrices.
"""
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file,
"r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size "
"(int) or the path to a pretrained model config "
"file (str)")
@classmethod
def from_dict(cls, json_object):
"""Constructs a `UniterConfig` from a
Python dictionary of parameters."""
config = UniterConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
"""Constructs a `UniterConfig` from a json file of parameters."""
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
"""Serializes this instance to a Python dictionary."""
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
"""Serializes this instance to a JSON string."""
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class UniterPreTrainedModel(nn.Module):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, UniterConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of "
"class `UniterConfig`. To create a model from a Google "
"pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses
# truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, FusedLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, config_file, state_dict, *inputs, **kwargs):
"""
Instantiate a UniterPreTrainedModel from a pre-trained model file or a
pytorch state dict.
Params:
config_file: config json file
state_dict: an state dictionnary
*inputs, **kwargs: additional input for the specific Uniter class
"""
# Load config
config = UniterConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
# Instantiate model.
model = cls(config, *inputs, **kwargs)
# Load from a PyTorch state_dict
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
# copy state_dict so _load_from_state_dict can modify it
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if metadata is None
else metadata.get(prefix[:-1], {}))
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys,
unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.')
for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from "
"pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in "
"{}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for '
'{}:\n\t{}'.format(
model.__class__.__name__,
"\n\t".join(error_msgs)))
return model
class UniterTextEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size,
config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings,
config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size,
config.hidden_size)
# self.LayerNorm is not snake-cased to stick with TensorFlow model
# variable name and be able to load any TensorFlow checkpoint file
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, position_ids, token_type_ids=None):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (words_embeddings
+ position_embeddings
+ token_type_embeddings)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterImageEmbeddings(nn.Module):
def __init__(self, config, img_dim):
super().__init__()
self.img_linear = nn.Linear(img_dim, config.hidden_size)
self.img_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_linear = nn.Linear(7, config.hidden_size)
self.mask_embedding = nn.Embedding(2, img_dim, padding_idx=0)
# tf naming convention for layer norm
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, img_feat, img_pos_feat, type_embeddings, img_masks=None):
if img_masks is not None:
self.mask_embedding.weight.data[0, :].fill_(0)
mask = self.mask_embedding(img_masks.long())
img_feat = img_feat + mask
transformed_im = self.img_layer_norm(self.img_linear(img_feat))
transformed_pos = self.pos_layer_norm(self.pos_linear(img_pos_feat))
embeddings = transformed_im + transformed_pos + type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterEncoder(nn.Module):
def __init__(self, config):
super().__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, input_, attention_mask,
output_all_encoded_layers=True):
all_encoder_layers = []
hidden_states = input_
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class UniterModel(UniterPreTrainedModel):
""" Modification for Joint Vision-Language Encoding
"""
def __init__(self, config, img_dim):
super().__init__(config)
self.embeddings = UniterTextEmbeddings(config)
self.img_embeddings = UniterImageEmbeddings(config, img_dim)
self.encoder = UniterEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def _compute_txt_embeddings(self, input_ids, position_ids,
txt_type_ids=None):
output = self.embeddings(input_ids, position_ids, txt_type_ids)
return output
def _compute_img_embeddings(self, img_feat, img_pos_feat, img_masks=None,
img_type_ids=None):
if img_type_ids is None:
img_type_ids = torch.ones_like(img_feat[:, :, 0].long())
img_type_embeddings = self.embeddings.token_type_embeddings(
img_type_ids)
output = self.img_embeddings(img_feat, img_pos_feat,
img_type_embeddings, img_masks)
return output
def _compute_img_txt_embeddings(self, input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks=None,
txt_type_ids=None, img_type_ids=None):
txt_emb = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
img_emb = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
# align back to most compact input
gather_index = gather_index.unsqueeze(-1).expand(
-1, -1, self.config.hidden_size)
embedding_output = torch.gather(torch.cat([txt_emb, img_emb], dim=1),
dim=1, index=gather_index)
return embedding_output
def forward(self, input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index=None, img_masks=None,
output_all_encoded_layers=True,
txt_type_ids=None, img_type_ids=None):
# compute self-attention mask
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) # fp16 compatibility
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
# embedding layer
if input_ids is None:
# image only
embedding_output = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
elif img_feat is None:
# text only
embedding_output = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
else:
embedding_output = self._compute_img_txt_embeddings(
input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks, txt_type_ids, img_type_ids)
encoded_layers = self.encoder(
embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers
| 43.171196
| 79
| 0.615031
|
import copy
import json
import logging
from io import open
import torch
from torch import nn
from apex.normalization.fused_layer_norm import FusedLayerNorm
from .layer import BertLayer, BertPooler
logger = logging.getLogger(__name__)
class UniterConfig(object):
def __init__(self,
vocab_size_or_config_json_file,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02):
if isinstance(vocab_size_or_config_json_file, str):
with open(vocab_size_or_config_json_file,
"r", encoding='utf-8') as reader:
json_config = json.loads(reader.read())
for key, value in json_config.items():
self.__dict__[key] = value
elif isinstance(vocab_size_or_config_json_file, int):
self.vocab_size = vocab_size_or_config_json_file
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
else:
raise ValueError("First argument must be either a vocabulary size "
"(int) or the path to a pretrained model config "
"file (str)")
@classmethod
def from_dict(cls, json_object):
config = UniterConfig(vocab_size_or_config_json_file=-1)
for key, value in json_object.items():
config.__dict__[key] = value
return config
@classmethod
def from_json_file(cls, json_file):
with open(json_file, "r", encoding='utf-8') as reader:
text = reader.read()
return cls.from_dict(json.loads(text))
def __repr__(self):
return str(self.to_json_string())
def to_dict(self):
output = copy.deepcopy(self.__dict__)
return output
def to_json_string(self):
return json.dumps(self.to_dict(), indent=2, sort_keys=True) + "\n"
class UniterPreTrainedModel(nn.Module):
def __init__(self, config, *inputs, **kwargs):
super().__init__()
if not isinstance(config, UniterConfig):
raise ValueError(
"Parameter config in `{}(config)` should be an instance of "
"class `UniterConfig`. To create a model from a Google "
"pretrained model use "
"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`".format(
self.__class__.__name__, self.__class__.__name__
))
self.config = config
def init_weights(self, module):
if isinstance(module, (nn.Linear, nn.Embedding)):
module.weight.data.normal_(mean=0.0,
std=self.config.initializer_range)
elif isinstance(module, FusedLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
@classmethod
def from_pretrained(cls, config_file, state_dict, *inputs, **kwargs):
config = UniterConfig.from_json_file(config_file)
logger.info("Model config {}".format(config))
model = cls(config, *inputs, **kwargs)
old_keys = []
new_keys = []
for key in state_dict.keys():
new_key = None
if 'gamma' in key:
new_key = key.replace('gamma', 'weight')
if 'beta' in key:
new_key = key.replace('beta', 'bias')
if new_key:
old_keys.append(key)
new_keys.append(new_key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
missing_keys = []
unexpected_keys = []
error_msgs = []
metadata = getattr(state_dict, '_metadata', None)
state_dict = state_dict.copy()
if metadata is not None:
state_dict._metadata = metadata
def load(module, prefix=''):
local_metadata = ({} if metadata is None
else metadata.get(prefix[:-1], {}))
module._load_from_state_dict(
state_dict, prefix, local_metadata, True, missing_keys,
unexpected_keys, error_msgs)
for name, child in module._modules.items():
if child is not None:
load(child, prefix + name + '.')
start_prefix = ''
if not hasattr(model, 'bert') and any(s.startswith('bert.')
for s in state_dict.keys()):
start_prefix = 'bert.'
load(model, prefix=start_prefix)
if len(missing_keys) > 0:
logger.info("Weights of {} not initialized from "
"pretrained model: {}".format(
model.__class__.__name__, missing_keys))
if len(unexpected_keys) > 0:
logger.info("Weights from pretrained model not used in "
"{}: {}".format(
model.__class__.__name__, unexpected_keys))
if len(error_msgs) > 0:
raise RuntimeError('Error(s) in loading state_dict for '
'{}:\n\t{}'.format(
model.__class__.__name__,
"\n\t".join(error_msgs)))
return model
class UniterTextEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(config.vocab_size,
config.hidden_size, padding_idx=0)
self.position_embeddings = nn.Embedding(config.max_position_embeddings,
config.hidden_size)
self.token_type_embeddings = nn.Embedding(config.type_vocab_size,
config.hidden_size)
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, input_ids, position_ids, token_type_ids=None):
if token_type_ids is None:
token_type_ids = torch.zeros_like(input_ids)
words_embeddings = self.word_embeddings(input_ids)
position_embeddings = self.position_embeddings(position_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = (words_embeddings
+ position_embeddings
+ token_type_embeddings)
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterImageEmbeddings(nn.Module):
def __init__(self, config, img_dim):
super().__init__()
self.img_linear = nn.Linear(img_dim, config.hidden_size)
self.img_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_layer_norm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.pos_linear = nn.Linear(7, config.hidden_size)
self.mask_embedding = nn.Embedding(2, img_dim, padding_idx=0)
self.LayerNorm = FusedLayerNorm(config.hidden_size, eps=1e-12)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, img_feat, img_pos_feat, type_embeddings, img_masks=None):
if img_masks is not None:
self.mask_embedding.weight.data[0, :].fill_(0)
mask = self.mask_embedding(img_masks.long())
img_feat = img_feat + mask
transformed_im = self.img_layer_norm(self.img_linear(img_feat))
transformed_pos = self.pos_layer_norm(self.pos_linear(img_pos_feat))
embeddings = transformed_im + transformed_pos + type_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class UniterEncoder(nn.Module):
def __init__(self, config):
super().__init__()
layer = BertLayer(config)
self.layer = nn.ModuleList([copy.deepcopy(layer)
for _ in range(config.num_hidden_layers)])
def forward(self, input_, attention_mask,
output_all_encoded_layers=True):
all_encoder_layers = []
hidden_states = input_
for layer_module in self.layer:
hidden_states = layer_module(hidden_states, attention_mask)
if output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
if not output_all_encoded_layers:
all_encoder_layers.append(hidden_states)
return all_encoder_layers
class UniterModel(UniterPreTrainedModel):
def __init__(self, config, img_dim):
super().__init__(config)
self.embeddings = UniterTextEmbeddings(config)
self.img_embeddings = UniterImageEmbeddings(config, img_dim)
self.encoder = UniterEncoder(config)
self.pooler = BertPooler(config)
self.apply(self.init_weights)
def _compute_txt_embeddings(self, input_ids, position_ids,
txt_type_ids=None):
output = self.embeddings(input_ids, position_ids, txt_type_ids)
return output
def _compute_img_embeddings(self, img_feat, img_pos_feat, img_masks=None,
img_type_ids=None):
if img_type_ids is None:
img_type_ids = torch.ones_like(img_feat[:, :, 0].long())
img_type_embeddings = self.embeddings.token_type_embeddings(
img_type_ids)
output = self.img_embeddings(img_feat, img_pos_feat,
img_type_embeddings, img_masks)
return output
def _compute_img_txt_embeddings(self, input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks=None,
txt_type_ids=None, img_type_ids=None):
txt_emb = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
img_emb = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
gather_index = gather_index.unsqueeze(-1).expand(
-1, -1, self.config.hidden_size)
embedding_output = torch.gather(torch.cat([txt_emb, img_emb], dim=1),
dim=1, index=gather_index)
return embedding_output
def forward(self, input_ids, position_ids,
img_feat, img_pos_feat,
attention_mask, gather_index=None, img_masks=None,
output_all_encoded_layers=True,
txt_type_ids=None, img_type_ids=None):
extended_attention_mask = attention_mask.unsqueeze(1).unsqueeze(2)
extended_attention_mask = extended_attention_mask.to(
dtype=next(self.parameters()).dtype) extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
if input_ids is None:
embedding_output = self._compute_img_embeddings(
img_feat, img_pos_feat, img_masks, img_type_ids)
elif img_feat is None:
embedding_output = self._compute_txt_embeddings(
input_ids, position_ids, txt_type_ids)
else:
embedding_output = self._compute_img_txt_embeddings(
input_ids, position_ids,
img_feat, img_pos_feat,
gather_index, img_masks, txt_type_ids, img_type_ids)
encoded_layers = self.encoder(
embedding_output, extended_attention_mask,
output_all_encoded_layers=output_all_encoded_layers)
if not output_all_encoded_layers:
encoded_layers = encoded_layers[-1]
return encoded_layers
| true
| true
|
1c495033486807a83992b5b2fcd5ec296570ade8
| 576
|
py
|
Python
|
code/decision_tree/decision_tree_iris.py
|
CrazyXiao/machine-learning
|
8e1e8cb9cf6f4e1c403873168f2bacbd84a106bd
|
[
"MIT"
] | 200
|
2019-04-23T01:13:31.000Z
|
2021-08-01T07:56:46.000Z
|
code/decision_tree/decision_tree_iris.py
|
CrazyXiao/machine-learning
|
8e1e8cb9cf6f4e1c403873168f2bacbd84a106bd
|
[
"MIT"
] | null | null | null |
code/decision_tree/decision_tree_iris.py
|
CrazyXiao/machine-learning
|
8e1e8cb9cf6f4e1c403873168f2bacbd84a106bd
|
[
"MIT"
] | 10
|
2019-04-24T10:18:59.000Z
|
2021-04-19T12:58:59.000Z
|
#!/usr/bin/env python
"""
iris 决策树
"""
from sklearn.datasets import load_iris
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# 加载数据
iris = load_iris()
X = iris.data
y = iris.target
# 训练集和测试集
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print(X_train.shape)
# 构造分类器
classifier = tree.DecisionTreeClassifier()
classifier.fit(X_train, y_train)
# 测试集预测值
predictions = classifier.predict(X_test)
print(predictions)
# 准确率
print(accuracy_score(y_test, predictions))
| 16.457143
| 72
| 0.767361
|
from sklearn.datasets import load_iris
from sklearn import tree
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
iris = load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print(X_train.shape)
classifier = tree.DecisionTreeClassifier()
classifier.fit(X_train, y_train)
predictions = classifier.predict(X_test)
print(predictions)
print(accuracy_score(y_test, predictions))
| true
| true
|
1c495055dbaff7ed5a9f296fd48afb316a4ab298
| 1,294
|
py
|
Python
|
src/web/modules/ejudge/migrations/0004_auto_20160329_1924.py
|
fossabot/SIStema
|
1427dda2082688a9482c117d0e24ad380fdc26a6
|
[
"MIT"
] | 5
|
2018-03-08T17:22:27.000Z
|
2018-03-11T14:20:53.000Z
|
src/web/modules/ejudge/migrations/0004_auto_20160329_1924.py
|
fossabot/SIStema
|
1427dda2082688a9482c117d0e24ad380fdc26a6
|
[
"MIT"
] | 263
|
2018-03-08T18:05:12.000Z
|
2022-03-11T23:26:20.000Z
|
src/web/modules/ejudge/migrations/0004_auto_20160329_1924.py
|
fossabot/SIStema
|
1427dda2082688a9482c117d0e24ad380fdc26a6
|
[
"MIT"
] | 6
|
2018-03-12T19:48:19.000Z
|
2022-01-14T04:58:52.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import djchoices.choices
class Migration(migrations.Migration):
dependencies = [
('ejudge', '0003_auto_20160329_1823'),
]
operations = [
migrations.AddField(
model_name='queueelement',
name='wont_check_message',
field=models.TextField(default='', blank=True),
),
migrations.AlterField(
model_name='queueelement',
name='status',
field=models.PositiveIntegerField(default=1, validators=[djchoices.choices.ChoicesValidator({1: 'NOT FETCHED', 2: 'SUBMITTED', 3: 'CHECKED', 4: 'WONT CHECK'})], choices=[(1, 'NOT FETCHED'), (2, 'SUBMITTED'), (3, 'CHECKED'), (4, 'WONT CHECK')]),
),
migrations.AlterField(
model_name='queueelement',
name='submission',
field=models.ForeignKey(default=None, on_delete=models.CASCADE, blank=True, to='ejudge.Submission', null=True),
),
migrations.AlterField(
model_name='submission',
name='result',
field=models.ForeignKey(default=None, on_delete=models.CASCADE, blank=True, to='ejudge.SolutionCheckingResult', null=True),
),
]
| 35.944444
| 256
| 0.616692
|
from __future__ import unicode_literals
from django.db import models, migrations
import djchoices.choices
class Migration(migrations.Migration):
dependencies = [
('ejudge', '0003_auto_20160329_1823'),
]
operations = [
migrations.AddField(
model_name='queueelement',
name='wont_check_message',
field=models.TextField(default='', blank=True),
),
migrations.AlterField(
model_name='queueelement',
name='status',
field=models.PositiveIntegerField(default=1, validators=[djchoices.choices.ChoicesValidator({1: 'NOT FETCHED', 2: 'SUBMITTED', 3: 'CHECKED', 4: 'WONT CHECK'})], choices=[(1, 'NOT FETCHED'), (2, 'SUBMITTED'), (3, 'CHECKED'), (4, 'WONT CHECK')]),
),
migrations.AlterField(
model_name='queueelement',
name='submission',
field=models.ForeignKey(default=None, on_delete=models.CASCADE, blank=True, to='ejudge.Submission', null=True),
),
migrations.AlterField(
model_name='submission',
name='result',
field=models.ForeignKey(default=None, on_delete=models.CASCADE, blank=True, to='ejudge.SolutionCheckingResult', null=True),
),
]
| true
| true
|
1c495283b4a9eb1215b4155542911802987dc8c2
| 151
|
py
|
Python
|
reallySecureRandom.py
|
CabraKill/desafio_ford
|
9d0f5c5f7396b4fb702df23e8871b9906867d583
|
[
"MIT"
] | null | null | null |
reallySecureRandom.py
|
CabraKill/desafio_ford
|
9d0f5c5f7396b4fb702df23e8871b9906867d583
|
[
"MIT"
] | null | null | null |
reallySecureRandom.py
|
CabraKill/desafio_ford
|
9d0f5c5f7396b4fb702df23e8871b9906867d583
|
[
"MIT"
] | null | null | null |
import random
MIN_NUMBER = 0
MAX_NUMBER = 1000
def randomIntNumber(min: int = MIN_NUMBER, max: int = MAX_NUMBER):
return random.randint(min, max)
| 21.571429
| 66
| 0.741722
|
import random
MIN_NUMBER = 0
MAX_NUMBER = 1000
def randomIntNumber(min: int = MIN_NUMBER, max: int = MAX_NUMBER):
return random.randint(min, max)
| true
| true
|
1c4952934ed638a6e4875e47806d396365cee9cf
| 10,465
|
py
|
Python
|
pymc3/parallel_sampling.py
|
acolombi/pymc3
|
3cb45700156b63e786eb70909d3e1d6e1f21703a
|
[
"Apache-2.0"
] | 1
|
2018-06-11T03:13:00.000Z
|
2018-06-11T03:13:00.000Z
|
pymc3/parallel_sampling.py
|
acolombi/pymc3
|
3cb45700156b63e786eb70909d3e1d6e1f21703a
|
[
"Apache-2.0"
] | 2
|
2017-03-02T05:56:13.000Z
|
2019-12-06T19:15:42.000Z
|
pymc3/parallel_sampling.py
|
acolombi/pymc3
|
3cb45700156b63e786eb70909d3e1d6e1f21703a
|
[
"Apache-2.0"
] | null | null | null |
import multiprocessing
import multiprocessing.sharedctypes
import ctypes
import time
import logging
from collections import namedtuple
import traceback
import six
import numpy as np
from . import theanof
logger = logging.getLogger('pymc3')
# Taken from https://hg.python.org/cpython/rev/c4f92b597074
class RemoteTraceback(Exception):
def __init__(self, tb):
self.tb = tb
def __str__(self):
return self.tb
class ExceptionWithTraceback:
def __init__(self, exc, tb):
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return rebuild_exc, (self.exc, self.tb)
def rebuild_exc(exc, tb):
exc.__cause__ = RemoteTraceback(tb)
return exc
# Messages
# ('writing_done', is_last, sample_idx, tuning, stats)
# ('error', *exception_info)
# ('abort', reason)
# ('write_next',)
# ('start',)
class _Process(multiprocessing.Process):
"""Seperate process for each chain.
We communicate with the main process using a pipe,
and send finished samples using shared memory.
"""
def __init__(self, name, msg_pipe, step_method, shared_point,
draws, tune, seed):
super(_Process, self).__init__(daemon=True, name=name)
self._msg_pipe = msg_pipe
self._step_method = step_method
self._shared_point = shared_point
self._seed = seed
self._tt_seed = seed + 1
self._draws = draws
self._tune = tune
def run(self):
try:
# We do not create this in __init__, as pickling this
# would destroy the shared memory.
self._point = self._make_numpy_refs()
self._start_loop()
except KeyboardInterrupt:
pass
except BaseException as e:
e = ExceptionWithTraceback(e, e.__traceback__)
self._msg_pipe.send(('error', e))
finally:
self._msg_pipe.close()
def _make_numpy_refs(self):
shape_dtypes = self._step_method.vars_shape_dtype
point = {}
for name, (shape, dtype) in shape_dtypes.items():
array = self._shared_point[name]
self._shared_point[name] = array
point[name] = np.frombuffer(array, dtype).reshape(shape)
return point
def _write_point(self, point):
for name, vals in point.items():
self._point[name][...] = vals
def _recv_msg(self):
return self._msg_pipe.recv()
def _start_loop(self):
np.random.seed(self._seed)
theanof.set_tt_rng(self._tt_seed)
draw = 0
tuning = True
msg = self._recv_msg()
if msg[0] == 'abort':
raise KeyboardInterrupt()
if msg[0] != 'start':
raise ValueError('Unexpected msg ' + msg[0])
while True:
if draw < self._draws + self._tune:
point, stats = self._compute_point()
else:
return
if draw == self._tune:
self._step_method.stop_tuning()
tuning = False
msg = self._recv_msg()
if msg[0] == 'abort':
raise KeyboardInterrupt()
elif msg[0] == 'write_next':
self._write_point(point)
is_last = draw + 1 == self._draws + self._tune
if is_last:
warns = self._collect_warnings()
else:
warns = None
self._msg_pipe.send(
('writing_done', is_last, draw, tuning, stats, warns))
draw += 1
else:
raise ValueError('Unknown message ' + msg[0])
def _compute_point(self):
if self._step_method.generates_stats:
point, stats = self._step_method.step(self._point)
else:
point = self._step_method.step(self._point)
stats = None
return point, stats
def _collect_warnings(self):
if hasattr(self._step_method, 'warnings'):
return self._step_method.warnings()
else:
return []
class ProcessAdapter(object):
"""Control a Chain process from the main thread."""
def __init__(self, draws, tune, step_method, chain, seed, start):
self.chain = chain
process_name = "worker_chain_%s" % chain
self._msg_pipe, remote_conn = multiprocessing.Pipe()
self._shared_point = {}
self._point = {}
for name, (shape, dtype) in step_method.vars_shape_dtype.items():
size = 1
for dim in shape:
size *= int(dim)
size *= dtype.itemsize
if size != ctypes.c_size_t(size).value:
raise ValueError('Variable %s is too large' % name)
array = multiprocessing.sharedctypes.RawArray('c', size)
self._shared_point[name] = array
array_np = np.frombuffer(array, dtype).reshape(shape)
array_np[...] = start[name]
self._point[name] = array_np
self._readable = True
self._num_samples = 0
self._process = _Process(
process_name, remote_conn, step_method, self._shared_point,
draws, tune, seed)
# We fork right away, so that the main process can start tqdm threads
self._process.start()
@property
def shared_point_view(self):
"""May only be written to or read between a `recv_draw`
call from the process and a `write_next` or `abort` call.
"""
if not self._readable:
raise RuntimeError()
return self._point
def start(self):
self._msg_pipe.send(('start',))
def write_next(self):
self._readable = False
self._msg_pipe.send(('write_next',))
def abort(self):
self._msg_pipe.send(('abort',))
def join(self, timeout=None):
self._process.join(timeout)
def terminate(self):
self._process.terminate()
@staticmethod
def recv_draw(processes, timeout=3600):
if not processes:
raise ValueError('No processes.')
pipes = [proc._msg_pipe for proc in processes]
ready = multiprocessing.connection.wait(pipes)
if not ready:
raise multiprocessing.TimeoutError('No message from samplers.')
idxs = {id(proc._msg_pipe): proc for proc in processes}
proc = idxs[id(ready[0])]
msg = ready[0].recv()
if msg[0] == 'error':
old = msg[1]
six.raise_from(RuntimeError('Chain %s failed.' % proc.chain), old)
elif msg[0] == 'writing_done':
proc._readable = True
proc._num_samples += 1
return (proc,) + msg[1:]
else:
raise ValueError('Sampler sent bad message.')
@staticmethod
def terminate_all(processes, patience=2):
for process in processes:
try:
process.abort()
except EOFError:
pass
start_time = time.time()
try:
for process in processes:
timeout = time.time() + patience - start_time
if timeout < 0:
raise multiprocessing.TimeoutError()
process.join(timeout)
except multiprocessing.TimeoutError:
logger.warn('Chain processes did not terminate as expected. '
'Terminating forcefully...')
for process in processes:
process.terminate()
for process in processes:
process.join()
Draw = namedtuple(
'Draw',
['chain', 'is_last', 'draw_idx', 'tuning', 'stats', 'point', 'warnings']
)
class ParallelSampler(object):
def __init__(self, draws, tune, chains, cores, seeds, start_points,
step_method, start_chain_num=0, progressbar=True):
if progressbar:
import tqdm
tqdm_ = tqdm.tqdm
if any(len(arg) != chains for arg in [seeds, start_points]):
raise ValueError(
'Number of seeds and start_points must be %s.' % chains)
self._samplers = [
ProcessAdapter(draws, tune, step_method,
chain + start_chain_num, seed, start)
for chain, seed, start in zip(range(chains), seeds, start_points)
]
self._inactive = self._samplers.copy()
self._finished = []
self._active = []
self._max_active = cores
self._in_context = False
self._start_chain_num = start_chain_num
self._progress = None
if progressbar:
self._progress = tqdm_(
total=chains * (draws + tune), unit='draws',
desc='Sampling %s chains' % chains)
def _make_active(self):
while self._inactive and len(self._active) < self._max_active:
proc = self._inactive.pop(0)
proc.start()
proc.write_next()
self._active.append(proc)
def __iter__(self):
if not self._in_context:
raise ValueError('Use ParallelSampler as context manager.')
self._make_active()
while self._active:
draw = ProcessAdapter.recv_draw(self._active)
proc, is_last, draw, tuning, stats, warns = draw
if self._progress is not None:
self._progress.update()
if is_last:
proc.join()
self._active.remove(proc)
self._finished.append(proc)
self._make_active()
# We could also yield proc.shared_point_view directly,
# and only call proc.write_next() after the yield returns.
# This seems to be faster overally though, as the worker
# loses less time waiting.
point = {name: val.copy()
for name, val in proc.shared_point_view.items()}
# Already called for new proc in _make_active
if not is_last:
proc.write_next()
yield Draw(proc.chain, is_last, draw, tuning, stats, point, warns)
def __enter__(self):
self._in_context = True
return self
def __exit__(self, *args):
ProcessAdapter.terminate_all(self._samplers)
if self._progress is not None:
self._progress.close()
| 31.053412
| 78
| 0.572384
|
import multiprocessing
import multiprocessing.sharedctypes
import ctypes
import time
import logging
from collections import namedtuple
import traceback
import six
import numpy as np
from . import theanof
logger = logging.getLogger('pymc3')
class RemoteTraceback(Exception):
def __init__(self, tb):
self.tb = tb
def __str__(self):
return self.tb
class ExceptionWithTraceback:
def __init__(self, exc, tb):
tb = traceback.format_exception(type(exc), exc, tb)
tb = ''.join(tb)
self.exc = exc
self.tb = '\n"""\n%s"""' % tb
def __reduce__(self):
return rebuild_exc, (self.exc, self.tb)
def rebuild_exc(exc, tb):
exc.__cause__ = RemoteTraceback(tb)
return exc
class _Process(multiprocessing.Process):
def __init__(self, name, msg_pipe, step_method, shared_point,
draws, tune, seed):
super(_Process, self).__init__(daemon=True, name=name)
self._msg_pipe = msg_pipe
self._step_method = step_method
self._shared_point = shared_point
self._seed = seed
self._tt_seed = seed + 1
self._draws = draws
self._tune = tune
def run(self):
try:
self._point = self._make_numpy_refs()
self._start_loop()
except KeyboardInterrupt:
pass
except BaseException as e:
e = ExceptionWithTraceback(e, e.__traceback__)
self._msg_pipe.send(('error', e))
finally:
self._msg_pipe.close()
def _make_numpy_refs(self):
shape_dtypes = self._step_method.vars_shape_dtype
point = {}
for name, (shape, dtype) in shape_dtypes.items():
array = self._shared_point[name]
self._shared_point[name] = array
point[name] = np.frombuffer(array, dtype).reshape(shape)
return point
def _write_point(self, point):
for name, vals in point.items():
self._point[name][...] = vals
def _recv_msg(self):
return self._msg_pipe.recv()
def _start_loop(self):
np.random.seed(self._seed)
theanof.set_tt_rng(self._tt_seed)
draw = 0
tuning = True
msg = self._recv_msg()
if msg[0] == 'abort':
raise KeyboardInterrupt()
if msg[0] != 'start':
raise ValueError('Unexpected msg ' + msg[0])
while True:
if draw < self._draws + self._tune:
point, stats = self._compute_point()
else:
return
if draw == self._tune:
self._step_method.stop_tuning()
tuning = False
msg = self._recv_msg()
if msg[0] == 'abort':
raise KeyboardInterrupt()
elif msg[0] == 'write_next':
self._write_point(point)
is_last = draw + 1 == self._draws + self._tune
if is_last:
warns = self._collect_warnings()
else:
warns = None
self._msg_pipe.send(
('writing_done', is_last, draw, tuning, stats, warns))
draw += 1
else:
raise ValueError('Unknown message ' + msg[0])
def _compute_point(self):
if self._step_method.generates_stats:
point, stats = self._step_method.step(self._point)
else:
point = self._step_method.step(self._point)
stats = None
return point, stats
def _collect_warnings(self):
if hasattr(self._step_method, 'warnings'):
return self._step_method.warnings()
else:
return []
class ProcessAdapter(object):
def __init__(self, draws, tune, step_method, chain, seed, start):
self.chain = chain
process_name = "worker_chain_%s" % chain
self._msg_pipe, remote_conn = multiprocessing.Pipe()
self._shared_point = {}
self._point = {}
for name, (shape, dtype) in step_method.vars_shape_dtype.items():
size = 1
for dim in shape:
size *= int(dim)
size *= dtype.itemsize
if size != ctypes.c_size_t(size).value:
raise ValueError('Variable %s is too large' % name)
array = multiprocessing.sharedctypes.RawArray('c', size)
self._shared_point[name] = array
array_np = np.frombuffer(array, dtype).reshape(shape)
array_np[...] = start[name]
self._point[name] = array_np
self._readable = True
self._num_samples = 0
self._process = _Process(
process_name, remote_conn, step_method, self._shared_point,
draws, tune, seed)
self._process.start()
@property
def shared_point_view(self):
if not self._readable:
raise RuntimeError()
return self._point
def start(self):
self._msg_pipe.send(('start',))
def write_next(self):
self._readable = False
self._msg_pipe.send(('write_next',))
def abort(self):
self._msg_pipe.send(('abort',))
def join(self, timeout=None):
self._process.join(timeout)
def terminate(self):
self._process.terminate()
@staticmethod
def recv_draw(processes, timeout=3600):
if not processes:
raise ValueError('No processes.')
pipes = [proc._msg_pipe for proc in processes]
ready = multiprocessing.connection.wait(pipes)
if not ready:
raise multiprocessing.TimeoutError('No message from samplers.')
idxs = {id(proc._msg_pipe): proc for proc in processes}
proc = idxs[id(ready[0])]
msg = ready[0].recv()
if msg[0] == 'error':
old = msg[1]
six.raise_from(RuntimeError('Chain %s failed.' % proc.chain), old)
elif msg[0] == 'writing_done':
proc._readable = True
proc._num_samples += 1
return (proc,) + msg[1:]
else:
raise ValueError('Sampler sent bad message.')
@staticmethod
def terminate_all(processes, patience=2):
for process in processes:
try:
process.abort()
except EOFError:
pass
start_time = time.time()
try:
for process in processes:
timeout = time.time() + patience - start_time
if timeout < 0:
raise multiprocessing.TimeoutError()
process.join(timeout)
except multiprocessing.TimeoutError:
logger.warn('Chain processes did not terminate as expected. '
'Terminating forcefully...')
for process in processes:
process.terminate()
for process in processes:
process.join()
Draw = namedtuple(
'Draw',
['chain', 'is_last', 'draw_idx', 'tuning', 'stats', 'point', 'warnings']
)
class ParallelSampler(object):
def __init__(self, draws, tune, chains, cores, seeds, start_points,
step_method, start_chain_num=0, progressbar=True):
if progressbar:
import tqdm
tqdm_ = tqdm.tqdm
if any(len(arg) != chains for arg in [seeds, start_points]):
raise ValueError(
'Number of seeds and start_points must be %s.' % chains)
self._samplers = [
ProcessAdapter(draws, tune, step_method,
chain + start_chain_num, seed, start)
for chain, seed, start in zip(range(chains), seeds, start_points)
]
self._inactive = self._samplers.copy()
self._finished = []
self._active = []
self._max_active = cores
self._in_context = False
self._start_chain_num = start_chain_num
self._progress = None
if progressbar:
self._progress = tqdm_(
total=chains * (draws + tune), unit='draws',
desc='Sampling %s chains' % chains)
def _make_active(self):
while self._inactive and len(self._active) < self._max_active:
proc = self._inactive.pop(0)
proc.start()
proc.write_next()
self._active.append(proc)
def __iter__(self):
if not self._in_context:
raise ValueError('Use ParallelSampler as context manager.')
self._make_active()
while self._active:
draw = ProcessAdapter.recv_draw(self._active)
proc, is_last, draw, tuning, stats, warns = draw
if self._progress is not None:
self._progress.update()
if is_last:
proc.join()
self._active.remove(proc)
self._finished.append(proc)
self._make_active()
point = {name: val.copy()
for name, val in proc.shared_point_view.items()}
if not is_last:
proc.write_next()
yield Draw(proc.chain, is_last, draw, tuning, stats, point, warns)
def __enter__(self):
self._in_context = True
return self
def __exit__(self, *args):
ProcessAdapter.terminate_all(self._samplers)
if self._progress is not None:
self._progress.close()
| true
| true
|
1c4952dfb7b5146c980edaa2ae1af799017e768b
| 167
|
py
|
Python
|
sewer/config.py
|
prestix-studio/sewer
|
67867f778eb92c9c14cd028116f5695b0223baa2
|
[
"MIT"
] | 135
|
2017-12-31T22:01:33.000Z
|
2022-01-20T18:18:11.000Z
|
sewer/config.py
|
prestix-studio/sewer
|
67867f778eb92c9c14cd028116f5695b0223baa2
|
[
"MIT"
] | 149
|
2018-01-10T10:36:18.000Z
|
2021-07-01T16:22:47.000Z
|
sewer/config.py
|
prestix-studio/sewer
|
67867f778eb92c9c14cd028116f5695b0223baa2
|
[
"MIT"
] | 61
|
2018-03-05T16:58:55.000Z
|
2021-05-21T01:30:07.000Z
|
ACME_DIRECTORY_URL_STAGING = "https://acme-staging-v02.api.letsencrypt.org/directory"
ACME_DIRECTORY_URL_PRODUCTION = "https://acme-v02.api.letsencrypt.org/directory"
| 55.666667
| 85
| 0.826347
|
ACME_DIRECTORY_URL_STAGING = "https://acme-staging-v02.api.letsencrypt.org/directory"
ACME_DIRECTORY_URL_PRODUCTION = "https://acme-v02.api.letsencrypt.org/directory"
| true
| true
|
1c4952e55605b55e89e8c96cb5c304d56bad7210
| 2,668
|
py
|
Python
|
src/send_status.py
|
Satish615/deep-learning-containers-1
|
76e750e828b6f583a6b7b1c291057059a14285b1
|
[
"Apache-2.0"
] | 1
|
2021-12-17T15:50:48.000Z
|
2021-12-17T15:50:48.000Z
|
src/send_status.py
|
Satish615/deep-learning-containers-1
|
76e750e828b6f583a6b7b1c291057059a14285b1
|
[
"Apache-2.0"
] | null | null | null |
src/send_status.py
|
Satish615/deep-learning-containers-1
|
76e750e828b6f583a6b7b1c291057059a14285b1
|
[
"Apache-2.0"
] | null | null | null |
import os
import argparse
import utils
from github import GitHubHandler
def get_args():
"""
Manage arguments to this script when called directly
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--status",
choices=["0", "1", "2"],
help="Github status to set. 0 is fail, 1 is success, 2 is pending",
)
return parser.parse_args()
def get_target_url(project):
"""
Set the link for "Details" on PR builds
:param project: CodeBuild project name associated with the running build
:return: Link for the "Details" link associated with a GitHub status check
"""
region = os.getenv("AWS_REGION")
logpath = os.getenv("CODEBUILD_LOG_PATH")
return f"https://{region}.console.aws.amazon.com/codesuite/codebuild/projects/{project}/build/{project}%3A{logpath}" \
f"/log?region={region}"
def set_build_description(state, project, trigger_job):
"""
Set the build description, based on the state, project name, and job that triggered the project.
:param state: <str> choices are "success", "failure", "error" or "pending"
:param project: Project name associated with the running CodeBuild job
:param trigger_job: The name of the CodeBuild project that triggered this build
:return: <str> Description to be posted to the PR build
"""
if state == "success":
return f"{project} succeeded for {trigger_job}."
elif state == "failure" or state == "error":
return f"{project} is in state {state.upper()} for {trigger_job}! Check details to debug."
elif state == "pending":
return f"{project} is pending for {trigger_job}..."
else:
return f"Unknown state: {state}"
def post_status(state):
"""
Post the status with a constructed context to the PR.
:param state: <str> choices are "success", "failure", "error" or "pending"
"""
project_name = utils.get_codebuild_project_name()
trigger_job = os.getenv("TEST_TRIGGER", "UNKNOWN-TEST-TRIGGER")
target_url = get_target_url(project_name)
context = f"{trigger_job}_{project_name}"
description = set_build_description(state, project_name, trigger_job)
handler = GitHubHandler()
handler.set_status(
state=state,
context=context,
description=description,
target_url=target_url
)
def main():
codebuild_statuses = {"0": "failure", "1": "success", "2": "pending"}
args = get_args()
state = codebuild_statuses[args.status]
# Send status for given state
if os.getenv("BUILD_CONTEXT") == "PR":
post_status(state)
if __name__ == "__main__":
main()
| 30.318182
| 122
| 0.664168
|
import os
import argparse
import utils
from github import GitHubHandler
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--status",
choices=["0", "1", "2"],
help="Github status to set. 0 is fail, 1 is success, 2 is pending",
)
return parser.parse_args()
def get_target_url(project):
region = os.getenv("AWS_REGION")
logpath = os.getenv("CODEBUILD_LOG_PATH")
return f"https://{region}.console.aws.amazon.com/codesuite/codebuild/projects/{project}/build/{project}%3A{logpath}" \
f"/log?region={region}"
def set_build_description(state, project, trigger_job):
if state == "success":
return f"{project} succeeded for {trigger_job}."
elif state == "failure" or state == "error":
return f"{project} is in state {state.upper()} for {trigger_job}! Check details to debug."
elif state == "pending":
return f"{project} is pending for {trigger_job}..."
else:
return f"Unknown state: {state}"
def post_status(state):
project_name = utils.get_codebuild_project_name()
trigger_job = os.getenv("TEST_TRIGGER", "UNKNOWN-TEST-TRIGGER")
target_url = get_target_url(project_name)
context = f"{trigger_job}_{project_name}"
description = set_build_description(state, project_name, trigger_job)
handler = GitHubHandler()
handler.set_status(
state=state,
context=context,
description=description,
target_url=target_url
)
def main():
codebuild_statuses = {"0": "failure", "1": "success", "2": "pending"}
args = get_args()
state = codebuild_statuses[args.status]
if os.getenv("BUILD_CONTEXT") == "PR":
post_status(state)
if __name__ == "__main__":
main()
| true
| true
|
1c49536f4b591e818fca4649372187456dbc31aa
| 493
|
py
|
Python
|
parser/team08/Tytus_SQLPARSER_G8/Instrucciones/Tipo.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 35
|
2020-12-07T03:11:43.000Z
|
2021-04-15T17:38:16.000Z
|
parser/team08/Tytus_SQLPARSER_G8/Instrucciones/Tipo.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 47
|
2020-12-09T01:29:09.000Z
|
2021-01-13T05:37:50.000Z
|
parser/team08/Tytus_SQLPARSER_G8/Instrucciones/Tipo.py
|
webdev188/tytus
|
847071edb17b218f51bb969d335a8ec093d13f94
|
[
"MIT"
] | 556
|
2020-12-07T03:13:31.000Z
|
2021-06-17T17:41:10.000Z
|
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
class Tipo(Instruccion):
def __init__(self, id, tipo, owner, id2, valor, linea, columna):
Instruccion.__init__(self,tipo,linea,columna)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
print(self.valor + " linea: " + str(self.linea) + " columna: " + str(self.columna))
'''
instruccion = Tipo("hola mundo",None, 1,2)
instruccion.ejecutar(None,None)
'''
| 30.8125
| 91
| 0.677485
|
from Instrucciones.TablaSimbolos.Instruccion import Instruccion
class Tipo(Instruccion):
def __init__(self, id, tipo, owner, id2, valor, linea, columna):
Instruccion.__init__(self,tipo,linea,columna)
self.valor = valor
def ejecutar(self, tabla, arbol):
super().ejecutar(tabla,arbol)
print(self.valor + " linea: " + str(self.linea) + " columna: " + str(self.columna))
| true
| true
|
1c49548ead69b53400104cf9dac0bc4e40c5a598
| 3,709
|
py
|
Python
|
scripts/setup/generate_secrets.py
|
GauravVirmani/zulip
|
5a204d7c84d60e193f1ea0900d42848c5276a095
|
[
"Apache-2.0"
] | null | null | null |
scripts/setup/generate_secrets.py
|
GauravVirmani/zulip
|
5a204d7c84d60e193f1ea0900d42848c5276a095
|
[
"Apache-2.0"
] | null | null | null |
scripts/setup/generate_secrets.py
|
GauravVirmani/zulip
|
5a204d7c84d60e193f1ea0900d42848c5276a095
|
[
"Apache-2.0"
] | 1
|
2021-06-10T15:12:52.000Z
|
2021-06-10T15:12:52.000Z
|
#!/usr/bin/env python
# This tools generates /etc/zulip/zulip-secrets.conf
from __future__ import print_function
import sys
import os
import os.path
from os.path import dirname, abspath
if False:
from typing import Dict, Optional, Text
BASE_DIR = dirname(dirname(dirname(abspath(__file__))))
sys.path.append(BASE_DIR)
import scripts.lib.setup_path_on_import
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.settings'
from django.utils.crypto import get_random_string
import six
import argparse
from zerver.lib.str_utils import force_str
from zerver.lib.utils import generate_random_token
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..'))
CAMO_CONFIG_FILENAME = '/etc/default/camo'
AUTOGENERATED_SETTINGS = ['shared_secret', 'avatar_salt', 'rabbitmq_password', 'local_database_password',
'initial_password_salt']
# TODO: We can eliminate this function if we refactor the install
# script to run generate_secrets before zulip-puppet-apply.
def generate_camo_config_file(camo_key):
# type: (Text) -> None
camo_config = """ENABLED=yes
PORT=9292
CAMO_KEY=%s
""" % (camo_key,)
with open(CAMO_CONFIG_FILENAME, 'w') as camo_file:
camo_file.write(camo_config)
print("Generated Camo config file %s" % (CAMO_CONFIG_FILENAME,))
def generate_django_secretkey():
# type: () -> Text
"""Secret key generation taken from Django's startproject.py"""
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def get_old_conf(output_filename):
# type: (Text) -> Dict[str, Text]
if not os.path.exists(output_filename):
return {}
secrets_file = six.moves.configparser.RawConfigParser() # type: ignore # https://github.com/python/typeshed/issues/307
secrets_file.read(output_filename)
def get_secret(key):
# type: (Text) -> Optional[Text]
if secrets_file.has_option('secrets', key):
return secrets_file.get('secrets', key)
return None
fields = AUTOGENERATED_SETTINGS + ['secret_key', 'camo_key']
return {name: get_secret(name) for name in fields}
def generate_secrets(development=False):
# type: (bool) -> None
if development:
OUTPUT_SETTINGS_FILENAME = "zproject/dev-secrets.conf"
else:
OUTPUT_SETTINGS_FILENAME = "/etc/zulip/zulip-secrets.conf"
lines = [u'[secrets]\n']
def config_line(var, value):
# type: (Text, Text) -> Text
return "%s = %s\n" % (var, value)
old_conf = get_old_conf(OUTPUT_SETTINGS_FILENAME)
for name in AUTOGENERATED_SETTINGS:
lines.append(config_line(name, old_conf.get(name, generate_random_token(64))))
secret_key = old_conf.get('secret_key', generate_django_secretkey())
lines.append(config_line('secret_key', secret_key))
camo_key = old_conf.get('camo_key', get_random_string(64))
lines.append(config_line('camo_key', camo_key))
if not development:
# Write the Camo config file directly
generate_camo_config_file(camo_key)
out = open(OUTPUT_SETTINGS_FILENAME, 'w')
out.write(force_str("".join(lines)))
out.close()
print("Generated %s with auto-generated secrets!" % (OUTPUT_SETTINGS_FILENAME,))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--development', action='store_true', dest='development', help='For setting up the developer env for zulip')
group.add_argument('--production', action='store_false', dest='development', help='For setting up the production env for zulip')
results = parser.parse_args()
generate_secrets(results.development)
| 34.342593
| 132
| 0.7134
|
from __future__ import print_function
import sys
import os
import os.path
from os.path import dirname, abspath
if False:
from typing import Dict, Optional, Text
BASE_DIR = dirname(dirname(dirname(abspath(__file__))))
sys.path.append(BASE_DIR)
import scripts.lib.setup_path_on_import
os.environ['DJANGO_SETTINGS_MODULE'] = 'zproject.settings'
from django.utils.crypto import get_random_string
import six
import argparse
from zerver.lib.str_utils import force_str
from zerver.lib.utils import generate_random_token
os.chdir(os.path.join(os.path.dirname(__file__), '..', '..'))
CAMO_CONFIG_FILENAME = '/etc/default/camo'
AUTOGENERATED_SETTINGS = ['shared_secret', 'avatar_salt', 'rabbitmq_password', 'local_database_password',
'initial_password_salt']
def generate_camo_config_file(camo_key):
camo_config = """ENABLED=yes
PORT=9292
CAMO_KEY=%s
""" % (camo_key,)
with open(CAMO_CONFIG_FILENAME, 'w') as camo_file:
camo_file.write(camo_config)
print("Generated Camo config file %s" % (CAMO_CONFIG_FILENAME,))
def generate_django_secretkey():
chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
return get_random_string(50, chars)
def get_old_conf(output_filename):
if not os.path.exists(output_filename):
return {}
secrets_file = six.moves.configparser.RawConfigParser() secrets_file.read(output_filename)
def get_secret(key):
if secrets_file.has_option('secrets', key):
return secrets_file.get('secrets', key)
return None
fields = AUTOGENERATED_SETTINGS + ['secret_key', 'camo_key']
return {name: get_secret(name) for name in fields}
def generate_secrets(development=False):
if development:
OUTPUT_SETTINGS_FILENAME = "zproject/dev-secrets.conf"
else:
OUTPUT_SETTINGS_FILENAME = "/etc/zulip/zulip-secrets.conf"
lines = [u'[secrets]\n']
def config_line(var, value):
return "%s = %s\n" % (var, value)
old_conf = get_old_conf(OUTPUT_SETTINGS_FILENAME)
for name in AUTOGENERATED_SETTINGS:
lines.append(config_line(name, old_conf.get(name, generate_random_token(64))))
secret_key = old_conf.get('secret_key', generate_django_secretkey())
lines.append(config_line('secret_key', secret_key))
camo_key = old_conf.get('camo_key', get_random_string(64))
lines.append(config_line('camo_key', camo_key))
if not development:
generate_camo_config_file(camo_key)
out = open(OUTPUT_SETTINGS_FILENAME, 'w')
out.write(force_str("".join(lines)))
out.close()
print("Generated %s with auto-generated secrets!" % (OUTPUT_SETTINGS_FILENAME,))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--development', action='store_true', dest='development', help='For setting up the developer env for zulip')
group.add_argument('--production', action='store_false', dest='development', help='For setting up the production env for zulip')
results = parser.parse_args()
generate_secrets(results.development)
| true
| true
|
1c4954dc8435739b2d95abc3a8c025fdebc8c898
| 4,984
|
py
|
Python
|
tia/tests/test_rlab_table.py
|
lsternlicht/tia
|
fe74d1876260a946e52bd733bc32da0698749f2c
|
[
"BSD-3-Clause"
] | 23
|
2017-11-13T01:05:49.000Z
|
2022-03-30T01:38:00.000Z
|
tia/tests/test_rlab_table.py
|
lsternlicht/tia
|
fe74d1876260a946e52bd733bc32da0698749f2c
|
[
"BSD-3-Clause"
] | 1
|
2018-09-19T21:59:04.000Z
|
2018-09-19T21:59:04.000Z
|
tia/tests/test_rlab_table.py
|
lsternlicht/tia
|
fe74d1876260a946e52bd733bc32da0698749f2c
|
[
"BSD-3-Clause"
] | 13
|
2018-11-26T21:53:36.000Z
|
2022-01-09T00:10:27.000Z
|
import unittest
import pandas as pd
import pandas.util.testing as pdtest
import tia.rlab.table as tbl
class TestTable(unittest.TestCase):
def setUp(self):
self.df1 = df1 = pd.DataFrame({'A': [.55, .65], 'B': [1234., -5678.]}, index=['I1', 'I2'])
# Multi-index frame with multi-index
cols = pd.MultiIndex.from_arrays([['LEFT', 'LEFT', 'RIGHT', 'RIGHT'], ['A', 'B', 'A', 'B']])
idx = pd.MultiIndex.from_arrays([['TOP', 'BOTTOM'], ['I1', 'I2']])
self.mdf1 = pd.DataFrame([[.55, 1234., .55, 1234.], [.65, -5678., .65, -5678.]], columns=cols, index=idx)
def test_span_iter(self):
s = pd.Series([1, 1, 1, 3, 2, 2])
items = list(tbl.span_iter(s))
self.assertEqual(items, [(0, 2), (4, 5)])
# reverse and ensure it does not break it
s = s[::-1]
items = list(tbl.span_iter(s))
self.assertEqual(items, [(0, 2), (4, 5)])
def test_level_iter(self):
l1 = ['L_11', 'L_12']
l2 = ['L_21', 'L_22']
l3 = ['L_31', 'L_32']
midx = pd.MultiIndex.from_arrays([l1, l2, l3], names=['1', '2', '3'])
actual = list(tbl.level_iter(midx))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12'), (1, 0, 'L_21'), (1, 1, 'L_22'), (2, 0, 'L_31'), (2, 1, 'L_32')]
self.assertEqual(actual, expected)
actual = list(tbl.level_iter(midx, levels=[0, 2]))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12'), (2, 0, 'L_31'), (2, 1, 'L_32')]
self.assertEqual(actual, expected)
actual = list(tbl.level_iter(midx, levels=0))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12')]
self.assertEqual(actual, expected)
def test_region_formatter_iloc(self):
tf = tbl.TableFormatter(self.df1)
region = tf.cells
region.apply_format(lambda x: 'A')
expected = pd.DataFrame([['A', 'A'], ['A', 'A']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
#
# Use the location
#
region = region.iloc[:, 1]
region.apply_format(lambda x: 'B')
expected = pd.DataFrame([['A', 'B'], ['A', 'B']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
# Get single cell
region = region.iloc[1]
region.apply_format(lambda x: 'D')
expected = pd.DataFrame([['A', 'B'], ['A', 'D']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
# Get single cell
region = tf.cells.iloc[1, 0]
region.apply_format(lambda x: 'C')
expected = pd.DataFrame([['A', 'B'], ['C', 'D']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
def test_region_empty(self):
tf = tbl.TableFormatter(self.df1)
empty = tf['ALL'].empty_frame()
empty.apply_format(lambda x: x)
def test_detect_spans(self):
tf = tbl.TableFormatter(self.mdf1)
tf.header.detect_colspans()
self.assertEqual(['SPAN', (2, 0), (3, 0)], tf.style_cmds[0])
self.assertEqual(['SPAN', (4, 0), (5, 0)], tf.style_cmds[1])
tf = tbl.TableFormatter(self.mdf1.T)
tf.index.detect_rowspans()
self.assertEqual(['SPAN', (0, 2), (0, 3)], tf.style_cmds[0])
self.assertEqual(['SPAN', (0, 4), (0, 5)], tf.style_cmds[1])
def test_match(self):
tf = tbl.TableFormatter(self.mdf1)
vcopy = tf.formatted_values.copy()
tf.cells.match_column_labels(['A']).percent_format(precision=1)
vcopy.iloc[2, 4] = '55.0% ' # padded for neg
vcopy.iloc[3, 4] = '65.0% '
vcopy.iloc[2, 2] = '55.0% '
vcopy.iloc[3, 2] = '65.0% '
pdtest.assert_frame_equal(vcopy, tf.formatted_values)
def test_period_index(self):
df = pd.DataFrame({'x': [1., 2.], 'y': [3., 4.]}, index=pd.date_range('1/1/2015', freq='M', periods=2).to_period())
tf = tbl.TableFormatter(df)
# expected values
vcopy = tf.formatted_values.copy()
vcopy.iloc[1, 1] = '1 '
vcopy.iloc[2, 1] = '2 '
vcopy.iloc[1, 2] = '3 '
vcopy.iloc[2, 2] = '4 '
vcopy.iloc[1, 0] = '01/2015'
vcopy.iloc[2, 0] = '02/2015'
# buld the format
tf.cells.int_format()
tf.index.apply_format(lambda x: x.strftime('%m/%Y'))
pdtest.assert_frame_equal(vcopy, tf.formatted_values)
# Test when it is the columns
dfT = df.T
tfT = tbl.TableFormatter(dfT)
vcopy = tfT.formatted_values.copy()
vcopy.iloc[1, 1] = '1 '
vcopy.iloc[1, 2] = '2 '
vcopy.iloc[2, 1] = '3 '
vcopy.iloc[2, 2] = '4 '
vcopy.iloc[0, 1] = '01/2015'
vcopy.iloc[0, 2] = '02/2015'
# buld the format
tfT.cells.int_format()
tfT.header.apply_format(lambda x: x.strftime('%m/%Y'))
pdtest.assert_frame_equal(vcopy, tfT.formatted_values)
| 40.520325
| 123
| 0.552769
|
import unittest
import pandas as pd
import pandas.util.testing as pdtest
import tia.rlab.table as tbl
class TestTable(unittest.TestCase):
def setUp(self):
self.df1 = df1 = pd.DataFrame({'A': [.55, .65], 'B': [1234., -5678.]}, index=['I1', 'I2'])
cols = pd.MultiIndex.from_arrays([['LEFT', 'LEFT', 'RIGHT', 'RIGHT'], ['A', 'B', 'A', 'B']])
idx = pd.MultiIndex.from_arrays([['TOP', 'BOTTOM'], ['I1', 'I2']])
self.mdf1 = pd.DataFrame([[.55, 1234., .55, 1234.], [.65, -5678., .65, -5678.]], columns=cols, index=idx)
def test_span_iter(self):
s = pd.Series([1, 1, 1, 3, 2, 2])
items = list(tbl.span_iter(s))
self.assertEqual(items, [(0, 2), (4, 5)])
s = s[::-1]
items = list(tbl.span_iter(s))
self.assertEqual(items, [(0, 2), (4, 5)])
def test_level_iter(self):
l1 = ['L_11', 'L_12']
l2 = ['L_21', 'L_22']
l3 = ['L_31', 'L_32']
midx = pd.MultiIndex.from_arrays([l1, l2, l3], names=['1', '2', '3'])
actual = list(tbl.level_iter(midx))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12'), (1, 0, 'L_21'), (1, 1, 'L_22'), (2, 0, 'L_31'), (2, 1, 'L_32')]
self.assertEqual(actual, expected)
actual = list(tbl.level_iter(midx, levels=[0, 2]))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12'), (2, 0, 'L_31'), (2, 1, 'L_32')]
self.assertEqual(actual, expected)
actual = list(tbl.level_iter(midx, levels=0))
expected = [(0, 0, 'L_11'), (0, 1, 'L_12')]
self.assertEqual(actual, expected)
def test_region_formatter_iloc(self):
tf = tbl.TableFormatter(self.df1)
region = tf.cells
region.apply_format(lambda x: 'A')
expected = pd.DataFrame([['A', 'A'], ['A', 'A']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
region = region.iloc[:, 1]
region.apply_format(lambda x: 'B')
expected = pd.DataFrame([['A', 'B'], ['A', 'B']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
region = region.iloc[1]
region.apply_format(lambda x: 'D')
expected = pd.DataFrame([['A', 'B'], ['A', 'D']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
region = tf.cells.iloc[1, 0]
region.apply_format(lambda x: 'C')
expected = pd.DataFrame([['A', 'B'], ['C', 'D']], index=[1, 2], columns=[1, 2])
pdtest.assert_frame_equal(tf.cells.formatted_values, expected)
def test_region_empty(self):
tf = tbl.TableFormatter(self.df1)
empty = tf['ALL'].empty_frame()
empty.apply_format(lambda x: x)
def test_detect_spans(self):
tf = tbl.TableFormatter(self.mdf1)
tf.header.detect_colspans()
self.assertEqual(['SPAN', (2, 0), (3, 0)], tf.style_cmds[0])
self.assertEqual(['SPAN', (4, 0), (5, 0)], tf.style_cmds[1])
tf = tbl.TableFormatter(self.mdf1.T)
tf.index.detect_rowspans()
self.assertEqual(['SPAN', (0, 2), (0, 3)], tf.style_cmds[0])
self.assertEqual(['SPAN', (0, 4), (0, 5)], tf.style_cmds[1])
def test_match(self):
tf = tbl.TableFormatter(self.mdf1)
vcopy = tf.formatted_values.copy()
tf.cells.match_column_labels(['A']).percent_format(precision=1)
vcopy.iloc[2, 4] = '55.0% ' vcopy.iloc[3, 4] = '65.0% '
vcopy.iloc[2, 2] = '55.0% '
vcopy.iloc[3, 2] = '65.0% '
pdtest.assert_frame_equal(vcopy, tf.formatted_values)
def test_period_index(self):
df = pd.DataFrame({'x': [1., 2.], 'y': [3., 4.]}, index=pd.date_range('1/1/2015', freq='M', periods=2).to_period())
tf = tbl.TableFormatter(df)
vcopy = tf.formatted_values.copy()
vcopy.iloc[1, 1] = '1 '
vcopy.iloc[2, 1] = '2 '
vcopy.iloc[1, 2] = '3 '
vcopy.iloc[2, 2] = '4 '
vcopy.iloc[1, 0] = '01/2015'
vcopy.iloc[2, 0] = '02/2015'
tf.cells.int_format()
tf.index.apply_format(lambda x: x.strftime('%m/%Y'))
pdtest.assert_frame_equal(vcopy, tf.formatted_values)
dfT = df.T
tfT = tbl.TableFormatter(dfT)
vcopy = tfT.formatted_values.copy()
vcopy.iloc[1, 1] = '1 '
vcopy.iloc[1, 2] = '2 '
vcopy.iloc[2, 1] = '3 '
vcopy.iloc[2, 2] = '4 '
vcopy.iloc[0, 1] = '01/2015'
vcopy.iloc[0, 2] = '02/2015'
tfT.cells.int_format()
tfT.header.apply_format(lambda x: x.strftime('%m/%Y'))
pdtest.assert_frame_equal(vcopy, tfT.formatted_values)
| true
| true
|
1c49550b54ddcb81eae7fc9c5f4d29cbe211b580
| 3,020
|
py
|
Python
|
samples/tests/create_delete_entity_test.py
|
dxiao2003/dialogflow-python-client-v2
|
05a1d3f0682de2c7d8c0c4db3fa5fea8934dfe72
|
[
"Apache-2.0"
] | 1
|
2019-03-31T23:25:46.000Z
|
2019-03-31T23:25:46.000Z
|
samples/tests/create_delete_entity_test.py
|
dxiao2003/dialogflow-python-client-v2
|
05a1d3f0682de2c7d8c0c4db3fa5fea8934dfe72
|
[
"Apache-2.0"
] | 15
|
2020-01-28T23:14:29.000Z
|
2022-02-10T00:40:40.000Z
|
samples/tests/create_delete_entity_test.py
|
dxiao2003/dialogflow-python-client-v2
|
05a1d3f0682de2c7d8c0c4db3fa5fea8934dfe72
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
from .. import entity_type_management
from .. import entity_management
PROJECT_ID = os.getenv('GCLOUD_PROJECT')
ENTITY_TYPE_DISPLAY_NAME = 'fake_entity_type_for_testing'
ENTITY_VALUE_1 = 'fake_entity_for_testing_1'
ENTITY_VALUE_2 = 'fake_entity_for_testing_2'
SYNONYMS = ['fake_synonym_for_testing_1', 'fake_synonym_for_testing_2']
def test_create_entity_type(capsys):
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
assert len(entity_type_ids) == 0
entity_type = entity_type_management.create_entity_type(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME, 'KIND_MAP')
out, _ = capsys.readouterr()
assert 'display_name: "{}"'.format(ENTITY_TYPE_DISPLAY_NAME) in out
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
assert len(entity_type_ids) == 1
def test_create_entity(capsys):
entity_type_id = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)[0]
entity_management.create_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_1, [])
entity_management.create_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_2, SYNONYMS)
entity_management.list_entities(PROJECT_ID, entity_type_id)
out, _ = capsys.readouterr()
assert 'Entity value: {}'.format(ENTITY_VALUE_1) in out
assert 'Entity value: {}'.format(ENTITY_VALUE_2) in out
for synonym in SYNONYMS:
assert synonym in out
def test_delete_entity(capsys):
entity_type_id = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)[0]
entity_management.delete_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_1)
entity_management.delete_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_2)
entity_management.list_entities(PROJECT_ID, entity_type_id)
out, _ = capsys.readouterr()
assert out == ''
def test_delete_entity_type(capsys):
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
for entity_type_id in entity_type_ids:
entity_type_management.delete_entity_type(PROJECT_ID, entity_type_id)
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
assert len(entity_type_ids) == 0
| 33.555556
| 77
| 0.769536
|
from __future__ import absolute_import
import os
from .. import entity_type_management
from .. import entity_management
PROJECT_ID = os.getenv('GCLOUD_PROJECT')
ENTITY_TYPE_DISPLAY_NAME = 'fake_entity_type_for_testing'
ENTITY_VALUE_1 = 'fake_entity_for_testing_1'
ENTITY_VALUE_2 = 'fake_entity_for_testing_2'
SYNONYMS = ['fake_synonym_for_testing_1', 'fake_synonym_for_testing_2']
def test_create_entity_type(capsys):
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
assert len(entity_type_ids) == 0
entity_type = entity_type_management.create_entity_type(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME, 'KIND_MAP')
out, _ = capsys.readouterr()
assert 'display_name: "{}"'.format(ENTITY_TYPE_DISPLAY_NAME) in out
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
assert len(entity_type_ids) == 1
def test_create_entity(capsys):
entity_type_id = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)[0]
entity_management.create_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_1, [])
entity_management.create_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_2, SYNONYMS)
entity_management.list_entities(PROJECT_ID, entity_type_id)
out, _ = capsys.readouterr()
assert 'Entity value: {}'.format(ENTITY_VALUE_1) in out
assert 'Entity value: {}'.format(ENTITY_VALUE_2) in out
for synonym in SYNONYMS:
assert synonym in out
def test_delete_entity(capsys):
entity_type_id = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)[0]
entity_management.delete_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_1)
entity_management.delete_entity(
PROJECT_ID, entity_type_id, ENTITY_VALUE_2)
entity_management.list_entities(PROJECT_ID, entity_type_id)
out, _ = capsys.readouterr()
assert out == ''
def test_delete_entity_type(capsys):
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
for entity_type_id in entity_type_ids:
entity_type_management.delete_entity_type(PROJECT_ID, entity_type_id)
entity_type_ids = entity_type_management._get_entity_type_ids(
PROJECT_ID, ENTITY_TYPE_DISPLAY_NAME)
assert len(entity_type_ids) == 0
| true
| true
|
1c495713f2d7d3192ab5567d3b26f08e034a69eb
| 11,617
|
py
|
Python
|
pyabsa/core/apc/classic/__bert__/dataset_utils/data_utils_for_training.py
|
onlyrico/PyABSA
|
d0905eb5253eaa564d2244cd777e3a734bca777a
|
[
"MIT"
] | null | null | null |
pyabsa/core/apc/classic/__bert__/dataset_utils/data_utils_for_training.py
|
onlyrico/PyABSA
|
d0905eb5253eaa564d2244cd777e3a734bca777a
|
[
"MIT"
] | null | null | null |
pyabsa/core/apc/classic/__bert__/dataset_utils/data_utils_for_training.py
|
onlyrico/PyABSA
|
d0905eb5253eaa564d2244cd777e3a734bca777a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# file: data_utils.py
# author: songyouwei <youwei0314@gmail.com>
# Copyright (C) 2018. All Rights Reserved.
import os
import pickle
import numpy as np
import tqdm
from findfile import find_file
from google_drive_downloader.google_drive_downloader import GoogleDriveDownloader as gdd
from torch.utils.data import Dataset
from transformers import AutoTokenizer
from pyabsa.core.apc.classic.__glove__.dataset_utils.dependency_graph import prepare_dependency_graph
from pyabsa.core.apc.dataset_utils.apc_utils import load_apc_datasets
from pyabsa.utils.pyabsa_utils import check_and_fix_labels
def prepare_glove840_embedding(glove_path):
glove840_id = '1G-vd6W1oF9ByyJ-pzp9dcqKnr_plh4Em'
if not os.path.exists(glove_path):
os.mkdir(glove_path)
elif os.path.isfile(glove_path):
return glove_path
elif os.path.isdir(glove_path):
embedding_file = None
dir_path = os.path.dirname(glove_path)
if find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip')[0]
elif find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip')[0]
elif find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip')[0]
if embedding_file:
print('Find potential embedding files: {}'.format(embedding_file))
return embedding_file
zip_glove_path = os.path.join(glove_path, '__glove__.840B.300d.txt.zip')
print('No GloVe embedding found at {},'
' downloading __glove__.840B.300d.txt (2GB transferred / 5.5GB unzipped)...'.format(glove_path))
gdd.download_file_from_google_drive(file_id=glove840_id,
dest_path=zip_glove_path,
unzip=True
)
glove_path = find_file(glove_path, 'txt', exclude_key='.zip')
return glove_path
def build_tokenizer(dataset_list, max_seq_len, dat_fname, opt):
if os.path.exists(os.path.join(opt.dataset_path, dat_fname)):
print('Loading tokenizer on {}'.format(os.path.join(opt.dataset_path, dat_fname)))
tokenizer = pickle.load(open(os.path.join(opt.dataset_path, dat_fname), 'rb'))
else:
text = ''
for dataset_type in dataset_list:
for file in dataset_list[dataset_type]:
fin = open(file, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(0, len(lines), 3):
text_left, _, text_right = [s.lower().strip() for s in lines[i].partition("$T$")]
aspect = lines[i + 1].lower().strip()
text_raw = text_left + " " + aspect + " " + text_right
text += text_raw + " "
tokenizer = Tokenizer(max_seq_len)
tokenizer.fit_on_text(text)
pickle.dump(tokenizer, open(os.path.join(opt.dataset_path, dat_fname), 'wb'))
return tokenizer
def _load_word_vec(path, word2idx=None, embed_dim=300):
fin = open(path, 'r', encoding='utf-8', newline='\n', errors='ignore')
word_vec = {}
for line in tqdm.tqdm(fin, postfix='Loading embedding file...'):
tokens = line.rstrip().split()
word, vec = ' '.join(tokens[:-embed_dim]), tokens[-embed_dim:]
if word in word2idx.keys():
word_vec[word] = np.asarray(vec, dtype='float32')
return word_vec
def build_embedding_matrix(word2idx, embed_dim, dat_fname, opt):
if os.path.exists(os.path.join(opt.dataset_path, dat_fname)):
print('Loading cached embedding_matrix for {}'.format(os.path.join(opt.dataset_path, dat_fname)))
embedding_matrix = pickle.load(open(os.path.join(opt.dataset_path, dat_fname), 'rb'))
else:
print('Extracting embedding_matrix for {}'.format(dat_fname))
glove_path = prepare_glove840_embedding(opt.dataset_path)
embedding_matrix = np.zeros((len(word2idx) + 2, embed_dim)) # idx 0 and len(word2idx)+1 are all-zeros
word_vec = _load_word_vec(glove_path, word2idx=word2idx, embed_dim=embed_dim)
for word, i in tqdm.tqdm(word2idx.items(), postfix='Building embedding_matrix {}'.format(dat_fname)):
vec = word_vec.get(word)
if vec is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = vec
pickle.dump(embedding_matrix, open(os.path.join(opt.dataset_path, dat_fname), 'wb'))
return embedding_matrix
def pad_and_truncate(sequence, maxlen, dtype='int64', padding='post', truncating='post', value=0):
x = (np.ones(maxlen) * value).astype(dtype)
if truncating == 'pre':
trunc = sequence[-maxlen:]
else:
trunc = sequence[:maxlen]
trunc = np.asarray(trunc, dtype=dtype)
if padding == 'post':
x[:len(trunc)] = trunc
else:
x[-len(trunc):] = trunc
return x
class Tokenizer(object):
def __init__(self, max_seq_len, lower=True):
self.lower = lower
self.max_seq_len = max_seq_len
self.word2idx = {}
self.idx2word = {}
self.idx = 1
def fit_on_text(self, text):
if self.lower:
text = text.lower()
words = text.split()
for word in words:
if word not in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):
if self.lower:
text = text.lower()
words = text.split()
unknownidx = len(self.word2idx) + 1
sequence = [self.word2idx[w] if w in self.word2idx else unknownidx for w in words]
if len(sequence) == 0:
sequence = [0]
if reverse:
sequence = sequence[::-1]
return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)
class Tokenizer4Pretraining:
def __init__(self, max_seq_len, pretrained_bert_name):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_bert_name)
self.max_seq_len = max_seq_len
def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):
sequence = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
if len(sequence) == 0:
sequence = [0]
if reverse:
sequence = sequence[::-1]
return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)
class BERTBaselineABSADataset(Dataset):
bert_baseline_input_colses = {
'lstm_bert': ['text_indices'],
'td_lstm_bert': ['left_with_aspect_indices', 'right_with_aspect_indices'],
'tc_lstm_bert': ['left_with_aspect_indices', 'right_with_aspect_indices', 'aspect_indices'],
'atae_lstm_bert': ['text_indices', 'aspect_indices'],
'ian_bert': ['text_indices', 'aspect_indices'],
'memnet_bert': ['context_indices', 'aspect_indices'],
'ram_bert': ['text_indices', 'aspect_indices', 'left_indices'],
'cabasc_bert': ['text_indices', 'aspect_indices', 'left_with_aspect_indices', 'right_with_aspect_indices'],
'tnet_lf_bert': ['text_indices', 'aspect_indices', 'aspect_boundary'],
'aoa_bert': ['text_indices', 'aspect_indices'],
'mgan_bert': ['text_indices', 'aspect_indices', 'left_indices'],
'asgcn_bert': ['text_indices', 'aspect_indices', 'left_indices', 'dependency_graph'],
}
def __init__(self, dataset_list, tokenizer, opt):
lines = load_apc_datasets(dataset_list)
all_data = []
label_set = set()
if not os.path.exists(opt.dataset_path):
os.mkdir(os.path.join(os.getcwd(), opt.dataset_path))
opt.dataset_path = os.path.join(os.getcwd(), opt.dataset_path)
graph_path = prepare_dependency_graph(dataset_list, opt.dataset_path, opt.max_seq_len)
fin = open(graph_path, 'rb')
idx2graph = pickle.load(fin)
for i in tqdm.tqdm(range(0, len(lines), 3), postfix='building word indices...'):
text_left, _, text_right = [s.lower().strip() for s in lines[i].partition("$T$")]
aspect = lines[i + 1].lower().strip()
polarity = lines[i + 2].strip()
text_indices = tokenizer.text_to_sequence('[CLS] ' + text_left + ' ' + aspect + ' ' + text_right + " [SEP]")
context_indices = tokenizer.text_to_sequence(text_left + text_right)
left_indices = tokenizer.text_to_sequence(text_left)
left_with_aspect_indices = tokenizer.text_to_sequence('[CLS] ' + text_left + " " + aspect + " [SEP]")
right_indices = tokenizer.text_to_sequence(text_right, reverse=False)
right_with_aspect_indices = tokenizer.text_to_sequence(aspect + " " + text_right, reverse=False)
aspect_indices = tokenizer.text_to_sequence(aspect)
aspect_len = np.sum(aspect_indices != 0)
left_len = min(opt.max_seq_len - aspect_len, np.sum(left_indices != 0))
left_indices = np.concatenate((left_indices[:left_len], np.asarray([0] * (opt.max_seq_len - left_len))))
aspect_boundary = np.asarray([left_len, left_len + aspect_len - 1], dtype=np.int64)
polarity = int(polarity)
dependency_graph = np.pad(idx2graph[i],
((0, max(0, opt.max_seq_len - idx2graph[i].shape[0])),
(0, max(0, opt.max_seq_len - idx2graph[i].shape[0]))),
'constant')
dependency_graph = dependency_graph[:, range(0, opt.max_seq_len)]
dependency_graph = dependency_graph[range(0, opt.max_seq_len), :]
data = {
'text_indices': text_indices
if 'text_indices' in opt.model.inputs else 0,
'context_indices': context_indices
if 'context_indices' in opt.model.inputs else 0,
'left_indices': left_indices
if 'left_indices' in opt.model.inputs else 0,
'left_with_aspect_indices': left_with_aspect_indices
if 'left_with_aspect_indices' in opt.model.inputs else 0,
'right_indices': right_indices
if 'right_indices' in opt.model.inputs else 0,
'right_with_aspect_indices': right_with_aspect_indices
if 'right_with_aspect_indices' in opt.model.inputs else 0,
'aspect_indices': aspect_indices
if 'aspect_indices' in opt.model.inputs else 0,
'aspect_boundary': aspect_boundary
if 'aspect_boundary' in opt.model.inputs else 0,
'dependency_graph': dependency_graph
if 'dependency_graph' in opt.model.inputs else 0,
'polarity': polarity,
}
label_set.add(polarity)
all_data.append(data)
check_and_fix_labels(label_set, 'polarity', all_data)
opt.polarities_dim = len(label_set)
self.data = all_data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
| 44.003788
| 120
| 0.627615
|
import os
import pickle
import numpy as np
import tqdm
from findfile import find_file
from google_drive_downloader.google_drive_downloader import GoogleDriveDownloader as gdd
from torch.utils.data import Dataset
from transformers import AutoTokenizer
from pyabsa.core.apc.classic.__glove__.dataset_utils.dependency_graph import prepare_dependency_graph
from pyabsa.core.apc.dataset_utils.apc_utils import load_apc_datasets
from pyabsa.utils.pyabsa_utils import check_and_fix_labels
def prepare_glove840_embedding(glove_path):
glove840_id = '1G-vd6W1oF9ByyJ-pzp9dcqKnr_plh4Em'
if not os.path.exists(glove_path):
os.mkdir(glove_path)
elif os.path.isfile(glove_path):
return glove_path
elif os.path.isdir(glove_path):
embedding_file = None
dir_path = os.path.dirname(glove_path)
if find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.42B.300d.txt', exclude_key='.zip')[0]
elif find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.840B.300d.txt', exclude_key='.zip')[0]
elif find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip'):
embedding_file = find_file(dir_path, 'glove.twitter.27B.txt', exclude_key='.zip')[0]
if embedding_file:
print('Find potential embedding files: {}'.format(embedding_file))
return embedding_file
zip_glove_path = os.path.join(glove_path, '__glove__.840B.300d.txt.zip')
print('No GloVe embedding found at {},'
' downloading __glove__.840B.300d.txt (2GB transferred / 5.5GB unzipped)...'.format(glove_path))
gdd.download_file_from_google_drive(file_id=glove840_id,
dest_path=zip_glove_path,
unzip=True
)
glove_path = find_file(glove_path, 'txt', exclude_key='.zip')
return glove_path
def build_tokenizer(dataset_list, max_seq_len, dat_fname, opt):
if os.path.exists(os.path.join(opt.dataset_path, dat_fname)):
print('Loading tokenizer on {}'.format(os.path.join(opt.dataset_path, dat_fname)))
tokenizer = pickle.load(open(os.path.join(opt.dataset_path, dat_fname), 'rb'))
else:
text = ''
for dataset_type in dataset_list:
for file in dataset_list[dataset_type]:
fin = open(file, 'r', encoding='utf-8', newline='\n', errors='ignore')
lines = fin.readlines()
fin.close()
for i in range(0, len(lines), 3):
text_left, _, text_right = [s.lower().strip() for s in lines[i].partition("$T$")]
aspect = lines[i + 1].lower().strip()
text_raw = text_left + " " + aspect + " " + text_right
text += text_raw + " "
tokenizer = Tokenizer(max_seq_len)
tokenizer.fit_on_text(text)
pickle.dump(tokenizer, open(os.path.join(opt.dataset_path, dat_fname), 'wb'))
return tokenizer
def _load_word_vec(path, word2idx=None, embed_dim=300):
fin = open(path, 'r', encoding='utf-8', newline='\n', errors='ignore')
word_vec = {}
for line in tqdm.tqdm(fin, postfix='Loading embedding file...'):
tokens = line.rstrip().split()
word, vec = ' '.join(tokens[:-embed_dim]), tokens[-embed_dim:]
if word in word2idx.keys():
word_vec[word] = np.asarray(vec, dtype='float32')
return word_vec
def build_embedding_matrix(word2idx, embed_dim, dat_fname, opt):
if os.path.exists(os.path.join(opt.dataset_path, dat_fname)):
print('Loading cached embedding_matrix for {}'.format(os.path.join(opt.dataset_path, dat_fname)))
embedding_matrix = pickle.load(open(os.path.join(opt.dataset_path, dat_fname), 'rb'))
else:
print('Extracting embedding_matrix for {}'.format(dat_fname))
glove_path = prepare_glove840_embedding(opt.dataset_path)
embedding_matrix = np.zeros((len(word2idx) + 2, embed_dim))
word_vec = _load_word_vec(glove_path, word2idx=word2idx, embed_dim=embed_dim)
for word, i in tqdm.tqdm(word2idx.items(), postfix='Building embedding_matrix {}'.format(dat_fname)):
vec = word_vec.get(word)
if vec is not None:
embedding_matrix[i] = vec
pickle.dump(embedding_matrix, open(os.path.join(opt.dataset_path, dat_fname), 'wb'))
return embedding_matrix
def pad_and_truncate(sequence, maxlen, dtype='int64', padding='post', truncating='post', value=0):
x = (np.ones(maxlen) * value).astype(dtype)
if truncating == 'pre':
trunc = sequence[-maxlen:]
else:
trunc = sequence[:maxlen]
trunc = np.asarray(trunc, dtype=dtype)
if padding == 'post':
x[:len(trunc)] = trunc
else:
x[-len(trunc):] = trunc
return x
class Tokenizer(object):
def __init__(self, max_seq_len, lower=True):
self.lower = lower
self.max_seq_len = max_seq_len
self.word2idx = {}
self.idx2word = {}
self.idx = 1
def fit_on_text(self, text):
if self.lower:
text = text.lower()
words = text.split()
for word in words:
if word not in self.word2idx:
self.word2idx[word] = self.idx
self.idx2word[self.idx] = word
self.idx += 1
def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):
if self.lower:
text = text.lower()
words = text.split()
unknownidx = len(self.word2idx) + 1
sequence = [self.word2idx[w] if w in self.word2idx else unknownidx for w in words]
if len(sequence) == 0:
sequence = [0]
if reverse:
sequence = sequence[::-1]
return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)
class Tokenizer4Pretraining:
def __init__(self, max_seq_len, pretrained_bert_name):
self.tokenizer = AutoTokenizer.from_pretrained(pretrained_bert_name)
self.max_seq_len = max_seq_len
def text_to_sequence(self, text, reverse=False, padding='post', truncating='post'):
sequence = self.tokenizer.convert_tokens_to_ids(self.tokenizer.tokenize(text))
if len(sequence) == 0:
sequence = [0]
if reverse:
sequence = sequence[::-1]
return pad_and_truncate(sequence, self.max_seq_len, padding=padding, truncating=truncating)
class BERTBaselineABSADataset(Dataset):
bert_baseline_input_colses = {
'lstm_bert': ['text_indices'],
'td_lstm_bert': ['left_with_aspect_indices', 'right_with_aspect_indices'],
'tc_lstm_bert': ['left_with_aspect_indices', 'right_with_aspect_indices', 'aspect_indices'],
'atae_lstm_bert': ['text_indices', 'aspect_indices'],
'ian_bert': ['text_indices', 'aspect_indices'],
'memnet_bert': ['context_indices', 'aspect_indices'],
'ram_bert': ['text_indices', 'aspect_indices', 'left_indices'],
'cabasc_bert': ['text_indices', 'aspect_indices', 'left_with_aspect_indices', 'right_with_aspect_indices'],
'tnet_lf_bert': ['text_indices', 'aspect_indices', 'aspect_boundary'],
'aoa_bert': ['text_indices', 'aspect_indices'],
'mgan_bert': ['text_indices', 'aspect_indices', 'left_indices'],
'asgcn_bert': ['text_indices', 'aspect_indices', 'left_indices', 'dependency_graph'],
}
def __init__(self, dataset_list, tokenizer, opt):
lines = load_apc_datasets(dataset_list)
all_data = []
label_set = set()
if not os.path.exists(opt.dataset_path):
os.mkdir(os.path.join(os.getcwd(), opt.dataset_path))
opt.dataset_path = os.path.join(os.getcwd(), opt.dataset_path)
graph_path = prepare_dependency_graph(dataset_list, opt.dataset_path, opt.max_seq_len)
fin = open(graph_path, 'rb')
idx2graph = pickle.load(fin)
for i in tqdm.tqdm(range(0, len(lines), 3), postfix='building word indices...'):
text_left, _, text_right = [s.lower().strip() for s in lines[i].partition("$T$")]
aspect = lines[i + 1].lower().strip()
polarity = lines[i + 2].strip()
text_indices = tokenizer.text_to_sequence('[CLS] ' + text_left + ' ' + aspect + ' ' + text_right + " [SEP]")
context_indices = tokenizer.text_to_sequence(text_left + text_right)
left_indices = tokenizer.text_to_sequence(text_left)
left_with_aspect_indices = tokenizer.text_to_sequence('[CLS] ' + text_left + " " + aspect + " [SEP]")
right_indices = tokenizer.text_to_sequence(text_right, reverse=False)
right_with_aspect_indices = tokenizer.text_to_sequence(aspect + " " + text_right, reverse=False)
aspect_indices = tokenizer.text_to_sequence(aspect)
aspect_len = np.sum(aspect_indices != 0)
left_len = min(opt.max_seq_len - aspect_len, np.sum(left_indices != 0))
left_indices = np.concatenate((left_indices[:left_len], np.asarray([0] * (opt.max_seq_len - left_len))))
aspect_boundary = np.asarray([left_len, left_len + aspect_len - 1], dtype=np.int64)
polarity = int(polarity)
dependency_graph = np.pad(idx2graph[i],
((0, max(0, opt.max_seq_len - idx2graph[i].shape[0])),
(0, max(0, opt.max_seq_len - idx2graph[i].shape[0]))),
'constant')
dependency_graph = dependency_graph[:, range(0, opt.max_seq_len)]
dependency_graph = dependency_graph[range(0, opt.max_seq_len), :]
data = {
'text_indices': text_indices
if 'text_indices' in opt.model.inputs else 0,
'context_indices': context_indices
if 'context_indices' in opt.model.inputs else 0,
'left_indices': left_indices
if 'left_indices' in opt.model.inputs else 0,
'left_with_aspect_indices': left_with_aspect_indices
if 'left_with_aspect_indices' in opt.model.inputs else 0,
'right_indices': right_indices
if 'right_indices' in opt.model.inputs else 0,
'right_with_aspect_indices': right_with_aspect_indices
if 'right_with_aspect_indices' in opt.model.inputs else 0,
'aspect_indices': aspect_indices
if 'aspect_indices' in opt.model.inputs else 0,
'aspect_boundary': aspect_boundary
if 'aspect_boundary' in opt.model.inputs else 0,
'dependency_graph': dependency_graph
if 'dependency_graph' in opt.model.inputs else 0,
'polarity': polarity,
}
label_set.add(polarity)
all_data.append(data)
check_and_fix_labels(label_set, 'polarity', all_data)
opt.polarities_dim = len(label_set)
self.data = all_data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
| true
| true
|
1c49582dab9e0f3c90ba1de50ff9860965a98b5d
| 421
|
py
|
Python
|
labs/4.1/server.py
|
alexellis/docker-blinkt-workshop
|
ae2204bbc85658b111e864ae4b39b05583eb4ebf
|
[
"MIT"
] | 171
|
2017-04-10T19:09:36.000Z
|
2022-03-04T16:06:30.000Z
|
labs/4.1/server.py
|
mcne65/docker-blinkt-workshop
|
ae2204bbc85658b111e864ae4b39b05583eb4ebf
|
[
"MIT"
] | 4
|
2017-04-17T19:33:46.000Z
|
2017-08-02T17:46:18.000Z
|
labs/4.1/server.py
|
mcne65/docker-blinkt-workshop
|
ae2204bbc85658b111e864ae4b39b05583eb4ebf
|
[
"MIT"
] | 30
|
2017-04-17T19:03:54.000Z
|
2022-03-04T16:06:31.000Z
|
from flask import Flask, request, render_template
import json
app = Flask(__name__)
@app.route('/', methods=['GET'])
def home():
file = open("/sys/class/thermal/thermal_zone0/temp")
data = file.read().rstrip() # remove trailing '\n' newline character.
file.close()
payload = json.dumps({ "temperature": data })
return payload
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0')
| 26.3125
| 73
| 0.657957
|
from flask import Flask, request, render_template
import json
app = Flask(__name__)
@app.route('/', methods=['GET'])
def home():
file = open("/sys/class/thermal/thermal_zone0/temp")
data = file.read().rstrip() file.close()
payload = json.dumps({ "temperature": data })
return payload
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0')
| true
| true
|
1c4958fcf0f982431d31d142ff78a7ab416fc6e0
| 10,841
|
py
|
Python
|
docs/source/conf.py
|
wlad111/pymc3
|
43432834be5bbca72caa32d40a848515eea554a8
|
[
"Apache-2.0"
] | null | null | null |
docs/source/conf.py
|
wlad111/pymc3
|
43432834be5bbca72caa32d40a848515eea554a8
|
[
"Apache-2.0"
] | null | null | null |
docs/source/conf.py
|
wlad111/pymc3
|
43432834be5bbca72caa32d40a848515eea554a8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pymc3_ext documentation build configuration file, created by
# sphinx-quickstart on Sat Dec 26 14:40:23 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import pymc3_ext
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath(os.path.join("..", "..")))
sys.path.insert(0, os.path.abspath("sphinxext"))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"matplotlib.sphinxext.plot_directive",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.mathjax",
"nbsphinx",
"numpydoc",
"IPython.sphinxext.ipython_console_highlighting",
"IPython.sphinxext.ipython_directive",
"sphinx.ext.autosectionlabel",
"sphinx.ext.napoleon",
"gallery_generator",
"recommonmark",
]
# Don't auto-generate summary for class members.
numpydoc_show_class_members = False
# Show the documentation of __init__ and the class docstring
autoclass_content = "both"
# Do not show the return type as seperate section
napoleon_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "PyMC3"
copyright = "2018, The PyMC Development Team"
author = "PyMC developers"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pymc3_ext.__version__
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build", "**.ipynb_checkpoints"]
nbsphinx_execute = "never"
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "friendly"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme_path = ["."]
html_theme = "semantic_sphinx"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
"navbar_links": [
("Tutorials", "nb_tutorials/index"),
("Examples", "nb_examples/index"),
("Books + Videos", "learn"),
("API", "api"),
("Developer Guide", "developer_guide"),
("About PyMC3", "history")
],
# "fixed_sidebar": "false",
# "description": "Probabilistic Programming in Python: Bayesian Modeling and Probabilistic Machine Learning with Theano"
}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "../pymc3_logo.jpg"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "../logos/PyMC3.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static", "nb_tutorials/_images", "nb_examples/_images"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {"**": ["about.html", "navigation.html", "searchbox.html"]}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "pymc3doc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, "pymc3_ext.tex", "PyMC3 Documentation", "PyMC developers", "manual")
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "pymc3_ext", "pymc3_ext Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"pymc3_ext",
"pymc3_ext Documentation",
author,
"pymc3_ext",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
def setup(app):
app.add_stylesheet(
"https://cdn.jsdelivr.net/npm/semantic-ui@2.4.2/dist/semantic.min.css"
)
app.add_stylesheet("default.css")
| 32.555556
| 128
| 0.705009
|
import sys
import os
import pymc3_ext
sys.path.insert(0, os.path.abspath(os.path.join("..", "..")))
sys.path.insert(0, os.path.abspath("sphinxext"))
extensions = [
"matplotlib.sphinxext.plot_directive",
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.mathjax",
"nbsphinx",
"numpydoc",
"IPython.sphinxext.ipython_console_highlighting",
"IPython.sphinxext.ipython_directive",
"sphinx.ext.autosectionlabel",
"sphinx.ext.napoleon",
"gallery_generator",
"recommonmark",
]
numpydoc_show_class_members = False
# Show the documentation of __init__ and the class docstring
autoclass_content = "both"
# Do not show the return type as seperate section
napoleon_use_rtype = False
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = [".rst", ".md"]
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "PyMC3"
copyright = "2018, The PyMC Development Team"
author = "PyMC developers"
# The version info for the project you're documenting, acts as replacement for
version = pymc3_ext.__version__
release = version
language = None
exclude_patterns = ["_build", "**.ipynb_checkpoints"]
nbsphinx_execute = "never"
pygments_style = "friendly"
todo_include_todos = False
html_theme_path = ["."]
html_theme = "semantic_sphinx"
html_theme_options = {
"navbar_links": [
("Tutorials", "nb_tutorials/index"),
("Examples", "nb_examples/index"),
("Books + Videos", "learn"),
("API", "api"),
("Developer Guide", "developer_guide"),
("About PyMC3", "history")
],
}
html_logo = "../pymc3_logo.jpg"
html_favicon = "../logos/PyMC3.ico"
html_static_path = ["_static", "nb_tutorials/_images", "nb_examples/_images"]
html_sidebars = {"**": ["about.html", "navigation.html", "searchbox.html"]}
htmlhelp_basename = "pymc3doc"
latex_elements = {
}
latex_documents = [
(master_doc, "pymc3_ext.tex", "PyMC3 Documentation", "PyMC developers", "manual")
]
man_pages = [(master_doc, "pymc3_ext", "pymc3_ext Documentation", [author], 1)]
texinfo_documents = [
(
master_doc,
"pymc3_ext",
"pymc3_ext Documentation",
author,
"pymc3_ext",
"One line description of project.",
"Miscellaneous",
)
]
# texinfo_no_detailmenu = False
def setup(app):
app.add_stylesheet(
"https://cdn.jsdelivr.net/npm/semantic-ui@2.4.2/dist/semantic.min.css"
)
app.add_stylesheet("default.css")
| true
| true
|
1c495b77063d3e04fd1ff7e97fe4f6361eba5132
| 834
|
py
|
Python
|
Chapter06/example4.py
|
jpgacrama/Mastering-Concurrency-in-Python
|
3033840fe9b36320ba41a4f23a7d5284d0e47e7c
|
[
"MIT"
] | null | null | null |
Chapter06/example4.py
|
jpgacrama/Mastering-Concurrency-in-Python
|
3033840fe9b36320ba41a4f23a7d5284d0e47e7c
|
[
"MIT"
] | null | null | null |
Chapter06/example4.py
|
jpgacrama/Mastering-Concurrency-in-Python
|
3033840fe9b36320ba41a4f23a7d5284d0e47e7c
|
[
"MIT"
] | null | null | null |
# ch6/example4.py
from multiprocessing import Process, current_process
import time
from os import system, name
def f1():
p = current_process()
print('Starting process %s, ID %s...' % (p.name, p.pid))
time.sleep(4)
print('Exiting process %s, ID %s...' % (p.name, p.pid))
def f2():
p = current_process()
print('Starting process %s, ID %s...' % (p.name, p.pid))
time.sleep(2)
print('Exiting process %s, ID %s...' % (p.name, p.pid))
def clear():
# for windows
if name == 'nt':
_ = system('cls')
# for mac and linux(here, os.name is 'posix')
else:
_ = system('clear')
if __name__ == '__main__':
clear()
p1 = Process(name='Worker 1', target=f1)
p1.daemon = True
p2 = Process(name='Worker 2', target=f2)
p1.start()
time.sleep(1)
p2.start()
| 22.540541
| 60
| 0.577938
|
from multiprocessing import Process, current_process
import time
from os import system, name
def f1():
p = current_process()
print('Starting process %s, ID %s...' % (p.name, p.pid))
time.sleep(4)
print('Exiting process %s, ID %s...' % (p.name, p.pid))
def f2():
p = current_process()
print('Starting process %s, ID %s...' % (p.name, p.pid))
time.sleep(2)
print('Exiting process %s, ID %s...' % (p.name, p.pid))
def clear():
if name == 'nt':
_ = system('cls')
else:
_ = system('clear')
if __name__ == '__main__':
clear()
p1 = Process(name='Worker 1', target=f1)
p1.daemon = True
p2 = Process(name='Worker 2', target=f2)
p1.start()
time.sleep(1)
p2.start()
| true
| true
|
1c495d19aad684be7ab6647d0bbb9cc56a933309
| 218
|
py
|
Python
|
src/packages/play_text.py
|
Tpool1/Asclepius
|
760ab31a8933772faa76064a42b11ab6e12d6c9a
|
[
"MIT"
] | null | null | null |
src/packages/play_text.py
|
Tpool1/Asclepius
|
760ab31a8933772faa76064a42b11ab6e12d6c9a
|
[
"MIT"
] | null | null | null |
src/packages/play_text.py
|
Tpool1/Asclepius
|
760ab31a8933772faa76064a42b11ab6e12d6c9a
|
[
"MIT"
] | null | null | null |
import pyttsx3
from packages.write_conversation_data import write_conversation_data
def play_text(text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
write_conversation_data(text)
| 21.8
| 68
| 0.761468
|
import pyttsx3
from packages.write_conversation_data import write_conversation_data
def play_text(text):
engine = pyttsx3.init()
engine.say(text)
engine.runAndWait()
write_conversation_data(text)
| true
| true
|
1c495f1161e27118531d15882e1e5c93d9149524
| 3,826
|
py
|
Python
|
Airplane/chap10/autopilot.py
|
eyler94/ee674AirplaneSim
|
3ba2c6e685c2688a7f372475a7cd1f55f583d10e
|
[
"MIT"
] | 1
|
2020-06-07T00:14:42.000Z
|
2020-06-07T00:14:42.000Z
|
Submarine/chap10/autopilot.py
|
eyler94/ee674AirplaneSim
|
3ba2c6e685c2688a7f372475a7cd1f55f583d10e
|
[
"MIT"
] | null | null | null |
Submarine/chap10/autopilot.py
|
eyler94/ee674AirplaneSim
|
3ba2c6e685c2688a7f372475a7cd1f55f583d10e
|
[
"MIT"
] | 1
|
2019-06-24T22:10:48.000Z
|
2019-06-24T22:10:48.000Z
|
"""
autopilot block for mavsim_python
- Beard & McLain, PUP, 2012
- Last Update:
2/6/2019 - RWB
"""
import sys
import numpy as np
sys.path.append('..')
import parameters.control_parameters as AP
from chap6.pid_controlBrendon import pid_control#, pi_control, pd_control_with_rate
from message_types.msg_state import msg_state
from tools.tools import Euler2Quaternion, Quaternion2Euler
from control import matlab
class autopilot:
def __init__(self, ts_control):
# instantiate lateral controllers
self.roll_from_aileron = pid_control( #pd_control_with_rate(
kp=AP.roll_kp,
kd=AP.roll_kd,
Ts=ts_control,
limit=np.radians(45))
self.course_from_roll = pid_control( #pi_control(
kp=AP.course_kp,
ki=AP.course_ki,
Ts=ts_control,
limit=np.radians(30))
self.sideslip_from_rudder = pid_control( #pi_control(
kp=AP.sideslip_kp,
ki=AP.sideslip_ki,
Ts=ts_control,
limit=np.radians(45))
self.yaw_damper = matlab.tf([0.5, 0.],[1.0, ],ts_control)
#
# num=np.array([[AP.yaw_damper_kp, 0]]),
# den=np.array([[1, 1/AP.yaw_damper_tau_r]]),
# Ts=ts_control)
# instantiate lateral controllers
self.pitch_from_elevator = pid_control( #pd_control_with_rate(
kp=AP.pitch_kp,
kd=AP.pitch_kd,
limit=np.radians(45))
self.altitude_from_pitch = pid_control( #pi_control(
kp=AP.altitude_kp,
ki=AP.altitude_ki,
Ts=ts_control,
limit=np.radians(30))
self.airspeed_from_throttle = pid_control( #pi_control(
kp=AP.airspeed_throttle_kp,
ki=AP.airspeed_throttle_ki,
Ts=ts_control,
limit=1.5,
throttle_flag=True)
self.commanded_state = msg_state()
def update(self, cmd, state):
# lateral autopilot
phi_c = cmd.phi_feedforward + self.course_from_roll.update(cmd.course_command,state.chi,reset_flag=True) #cmd.course_command
# delta_a = -8.13462186e-09 # Trim state
delta_a = self.roll_from_aileron.update_with_rate(phi_c, state.phi, state.p) # Controller based on chi command#
# delta_r = -1.21428507e-08
delta_r = self.sideslip_from_rudder.update(0,state.beta)
# longitudinal autopilot
h_c = cmd.altitude_command
theta_c = np.pi/16
theta_c = self.altitude_from_pitch.update(h_c, state.h)
# delta_e = -1.24785989e-01
delta_e = self.pitch_from_elevator.update_with_rate(theta_c, state.theta, state.q)
# delta_t = 3.14346798e-01 # Trim state
delta_t = self.airspeed_from_throttle.update(cmd.airspeed_command, state.Va)
# construct output and commanded states
delta = np.array([[delta_e], [delta_t], [delta_a], [delta_r]])
self.commanded_state.h = cmd.altitude_command
self.commanded_state.Va = cmd.airspeed_command
self.commanded_state.phi = phi_c
self.commanded_state.theta = theta_c
self.commanded_state.chi = cmd.course_command
return delta, self.commanded_state
def saturate(self, input, low_limit, up_limit):
if input <= low_limit:
output = low_limit
elif input >= up_limit:
output = up_limit
else:
output = input
return output
| 40.273684
| 133
| 0.575536
|
import sys
import numpy as np
sys.path.append('..')
import parameters.control_parameters as AP
from chap6.pid_controlBrendon import pid_controlfrom message_types.msg_state import msg_state
from tools.tools import Euler2Quaternion, Quaternion2Euler
from control import matlab
class autopilot:
def __init__(self, ts_control):
self.roll_from_aileron = pid_control( kp=AP.roll_kp,
kd=AP.roll_kd,
Ts=ts_control,
limit=np.radians(45))
self.course_from_roll = pid_control( kp=AP.course_kp,
ki=AP.course_ki,
Ts=ts_control,
limit=np.radians(30))
self.sideslip_from_rudder = pid_control( kp=AP.sideslip_kp,
ki=AP.sideslip_ki,
Ts=ts_control,
limit=np.radians(45))
self.yaw_damper = matlab.tf([0.5, 0.],[1.0, ],ts_control)
self.pitch_from_elevator = pid_control( kp=AP.pitch_kp,
kd=AP.pitch_kd,
limit=np.radians(45))
self.altitude_from_pitch = pid_control( kp=AP.altitude_kp,
ki=AP.altitude_ki,
Ts=ts_control,
limit=np.radians(30))
self.airspeed_from_throttle = pid_control( kp=AP.airspeed_throttle_kp,
ki=AP.airspeed_throttle_ki,
Ts=ts_control,
limit=1.5,
throttle_flag=True)
self.commanded_state = msg_state()
def update(self, cmd, state):
phi_c = cmd.phi_feedforward + self.course_from_roll.update(cmd.course_command,state.chi,reset_flag=True) delta_a = self.roll_from_aileron.update_with_rate(phi_c, state.phi, state.p) delta_r = self.sideslip_from_rudder.update(0,state.beta)
h_c = cmd.altitude_command
theta_c = np.pi/16
theta_c = self.altitude_from_pitch.update(h_c, state.h)
delta_e = self.pitch_from_elevator.update_with_rate(theta_c, state.theta, state.q)
delta_t = self.airspeed_from_throttle.update(cmd.airspeed_command, state.Va)
delta = np.array([[delta_e], [delta_t], [delta_a], [delta_r]])
self.commanded_state.h = cmd.altitude_command
self.commanded_state.Va = cmd.airspeed_command
self.commanded_state.phi = phi_c
self.commanded_state.theta = theta_c
self.commanded_state.chi = cmd.course_command
return delta, self.commanded_state
def saturate(self, input, low_limit, up_limit):
if input <= low_limit:
output = low_limit
elif input >= up_limit:
output = up_limit
else:
output = input
return output
| true
| true
|
1c4960629fe2ffb245ea6b44937e597dbeb76aeb
| 178
|
py
|
Python
|
rund.py
|
devvspaces/mailfinder
|
a4d50a0d3bf80741e33df69c74c94daffebc435b
|
[
"MIT"
] | null | null | null |
rund.py
|
devvspaces/mailfinder
|
a4d50a0d3bf80741e33df69c74c94daffebc435b
|
[
"MIT"
] | null | null | null |
rund.py
|
devvspaces/mailfinder
|
a4d50a0d3bf80741e33df69c74c94daffebc435b
|
[
"MIT"
] | null | null | null |
import re
with open('test.csv','r') as f:
response = f.read()
new_emails = set(re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.com", response, re.I))
print(new_emails)
| 29.666667
| 89
| 0.573034
|
import re
with open('test.csv','r') as f:
response = f.read()
new_emails = set(re.findall(r"[a-z0-9\.\-+_]+@[a-z0-9\.\-+_]+\.com", response, re.I))
print(new_emails)
| true
| true
|
1c496072aafd1dbd2e7caef2c92a7a1ad00fdb4b
| 403
|
py
|
Python
|
recipes/construct_webapp_class_manually.py
|
ammarsys/pyanywhere-wrapper
|
d8cde2d29900c25fc7ab3cd8103923f727b5dade
|
[
"MIT"
] | 5
|
2021-06-25T14:34:52.000Z
|
2021-07-04T14:15:13.000Z
|
recipes/construct_webapp_class_manually.py
|
ammarsys/pyanywhere-wrapper
|
d8cde2d29900c25fc7ab3cd8103923f727b5dade
|
[
"MIT"
] | 1
|
2021-12-12T00:47:25.000Z
|
2022-01-24T17:19:43.000Z
|
recipes/construct_webapp_class_manually.py
|
ammarsys/pyanywhere-wrapper
|
d8cde2d29900c25fc7ab3cd8103923f727b5dade
|
[
"MIT"
] | 1
|
2021-12-14T15:44:52.000Z
|
2021-12-14T15:44:52.000Z
|
from pyaww.webapp import WebApp
my_webapp = WebApp(
{'id': 123,
'user': 'sampleuser',
'domain_name': 'something.com',
'python_version': '3.8',
'source_directory': '/home/something/',
'working_directory': '/home/something/',
'virtualenv_path': '/home/something/venv',
'expiry': 'some date',
'force_https': False
}
)
# do stuff with the webapp object now
| 25.1875
| 47
| 0.620347
|
from pyaww.webapp import WebApp
my_webapp = WebApp(
{'id': 123,
'user': 'sampleuser',
'domain_name': 'something.com',
'python_version': '3.8',
'source_directory': '/home/something/',
'working_directory': '/home/something/',
'virtualenv_path': '/home/something/venv',
'expiry': 'some date',
'force_https': False
}
)
| true
| true
|
1c4961378a4b0a0bdb51ede423c33dd48070c102
| 10,890
|
py
|
Python
|
templates/games.py
|
tiendat101001/PythonProgrammingPuzzles
|
e4a6504bf783ad1ab93686cedd5d1818af92a5e4
|
[
"MIT"
] | null | null | null |
templates/games.py
|
tiendat101001/PythonProgrammingPuzzles
|
e4a6504bf783ad1ab93686cedd5d1818af92a5e4
|
[
"MIT"
] | null | null | null |
templates/games.py
|
tiendat101001/PythonProgrammingPuzzles
|
e4a6504bf783ad1ab93686cedd5d1818af92a5e4
|
[
"MIT"
] | null | null | null |
"""
Solve some two-player games
"""
from problems import Problem
from typing import List
# Hint: subclass Problem.Debug for quick testing. Run make_dataset.py to make the dataset
# See https://github.com/microsoft/PythonProgrammingPuzzles/wiki/How-to-add-a-puzzle for more info
class Nim(Problem):
"""Compute optimal play for the classic two-player game [Nim](https://en.wikipedia.org/wiki/Nim)
In the game of Nim, there are a number of heaps of objects. In each step, a player removes one or more
objects from a non-empty heap. The player who takes the last object wins. Nim has an elegant theory
for optimal play based on the xor of the bits.
"""
timeout = 10 # harder than most problems, get extra time
@staticmethod
def sat(cert: List[List[int]], heaps=[5, 9]): # cert is a sufficient list of desirable states to leave for opponent
good_leaves = {tuple(h) for h in cert} # for efficiency, we keep track of h as a tuple of n non-negative ints
cache = {}
def is_good_leave(h):
if h in cache:
return cache[h]
next_states = [(*h[:i], k, *h[i + 1:]) for i in range(len(h)) for k in range(h[i])]
conjecture = (h in good_leaves)
if conjecture: # check that it is a good leave
assert not any(is_good_leave(s) for s in next_states)
else: # check that it is a bad leave, only need to check one move
assert is_good_leave(next(s for s in next_states if s in good_leaves))
cache[h] = conjecture
return conjecture
return is_good_leave(tuple(heaps)) == (tuple(heaps) in good_leaves)
@staticmethod
def sol(heaps):
import itertools
def val(h): # return True if h is a good state to leave things in
xor = 0
for i in h:
xor ^= i
return xor == 0
return [list(h) for h in itertools.product(*[range(i + 1) for i in heaps]) if val(h)]
def gen_random(self):
num_heaps = self.random.randrange(10)
heaps = [self.random.randrange(10) for _ in range(num_heaps)]
prod = 1
for i in heaps:
prod *= i + 1
if prod < 10 ** 6:
self.add(dict(heaps=heaps))
class Mastermind(Problem):
"""Compute a strategy for winning in [mastermind](https://en.wikipedia.org/wiki/Mastermind_%28board_game%29)
in a given number of guesses.
Colors are represented by the letters A-F. The representation is as follows.
A transcript is a string describing the game so far. It consists of rows separated by newlines.
Each row has 4 letters A-F followed by a space and then two numbers indicating how many are exactly right
and how many are right but in the wrong location. A sample transcript is as follows:
```
AABB 11
ABCD 21
ABDC
```
This is the transcript as the game is in progress. The complete transcript might be:
```
AABB 11
ABCD 21
ABDC 30
ABDE 40
```
A winning strategy is described by a list of transcripts to visit. The next guess can be determined from
those partial transcripts.
"""
timeout = 10
@staticmethod
def sat(transcripts: List[str], max_moves=10):
COLORS = "ABCDEF"
def helper(secret: str, transcript=""):
if transcript.count("\n") == max_moves:
return False
guess = min([t for t in transcripts if t.startswith(transcript)], key=len)[-4:]
if guess == secret:
return True
assert all(g in COLORS for g in guess)
perfect = {c: sum([g == s == c for g, s in zip(guess, secret)]) for c in COLORS}
almost = sum(min(guess.count(c), secret.count(c)) - perfect[c] for c in COLORS)
return helper(secret, transcript + f"{guess} {sum(perfect.values())}{almost}\n")
return all(helper(r + s + t + u) for r in COLORS for s in COLORS for t in COLORS for u in COLORS)
@staticmethod
def sol(max_moves):
COLORS = "ABCDEF"
transcripts = []
ALL = [r + s + t + u for r in COLORS for s in COLORS for t in COLORS for u in COLORS]
def score(secret, guess):
perfect = {c: sum([g == s == c for g, s in zip(guess, secret)]) for c in COLORS}
almost = sum(min(guess.count(c), secret.count(c)) - perfect[c] for c in COLORS)
return f"{sum(perfect.values())}{almost}"
def mastermind(transcript="AABB", feasible=ALL): # mastermind moves
transcripts.append(transcript)
assert transcript.count("\n") <= max_moves
guess = transcript[-4:]
feasibles = {}
for secret in feasible:
scr = score(secret, guess)
if scr not in feasibles:
feasibles[scr] = []
feasibles[scr].append(secret)
for scr, secrets in feasibles.items():
if scr != "40":
guesser(transcript + f" {scr}\n", secrets)
def guesser(transcript, feasible): # guesser moves
def max_ambiguity(guess):
by_score = {}
for secret2 in feasible:
scr = score(secret2, guess)
if scr not in by_score:
by_score[scr] = 0
by_score[scr] += 1
# for OPTIMAL solution, use return max(by_score.values()) + 0.5 * (guess not in feasible) instead of:
return max(by_score.values())
# for optimal solution use guess = min(ALL, key=max_ambiguity) instead of:
guess = min(feasible, key=max_ambiguity)
mastermind(transcript + guess, feasible)
mastermind()
return transcripts
def gen(self, target_num_instances):
for max_moves in [6, 8, 10]:
self.add(dict(max_moves=max_moves))
class TicTacToeX(Problem):
"""Compute a strategy for X (first player) in tic-tac-toe that guarantees a tie.
We are looking for a strategy for X that, no matter what the opponent does, X does not lose.
A board is represented as a 9-char string like an X in the middle would be "....X...." and a
move is an integer 0-8. The answer is a list of "good boards" that X aims for, so no matter what O does there
is always good board that X can get to with a single move.
"""
@staticmethod
def sat(good_boards: List[str]):
board_bit_reps = {tuple(sum(1 << i for i in range(9) if b[i] == c) for c in "XO") for b in good_boards}
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
def tie(x, o): # returns True if X has a forced tie/win assuming it's X's turn to move.
x |= 1 << next(i for i in range(9) if (x | (1 << i), o) in board_bit_reps)
return not win[o] and (win[x] or all((x | o) & (1 << i) or tie(x, o | (1 << i)) for i in range(9)))
return tie(0, 0)
@staticmethod
def sol():
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)] # 9-bit representation
good_boards = []
def x_move(x, o): # returns True if x wins or ties, x's turn to move
if win[o]:
return False
if x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and o_move(x | (1 << i), o):
good_boards.append("".join(".XO"[((x >> j) & 1) + 2 * ((o >> j) & 1) + (i == j)] for j in range(9)))
return True
return False # O wins
def o_move(x, o): # returns True if x wins or ties, x's turn to move
if win[x] or x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and not x_move(x, o | (1 << i)):
return False
return True # O wins
res = x_move(0, 0)
assert res
return good_boards
class TicTacToeO(Problem):
"""Compute a strategy for O (second player) in tic-tac-toe that guarantees a tie.
We are looking for a strategy for O that, no matter what the opponent does, O does not lose.
A board is represented as a 9-char string like an X in the middle would be "....X...." and a
move is an integer 0-8. The answer is a list of "good boards" that O aims for, so no matter what X does there
is always good board that O can get to with a single move.
"""
@staticmethod
def sat(good_boards: List[str]):
board_bit_reps = {tuple(sum(1 << i for i in range(9) if b[i] == c) for c in "XO") for b in good_boards}
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
def tie(x, o): # returns True if O has a forced tie/win. It's O's turn to move.
if o | x != 511:
o |= 1 << next(i for i in range(9) if (x, o | (1 << i)) in board_bit_reps)
return not win[x] and (win[o] or all((x | o) & (1 << i) or tie(x | (1 << i), o) for i in range(9)))
return all(tie(1 << i, 0) for i in range(9))
@staticmethod
def sol():
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)] # 9-bit representation
good_boards = []
def x_move(x, o): # returns True if o wins or ties, x's turn to move
if win[o] or x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and not o_move(x | (1 << i), o):
return False
return True # O wins/ties
def o_move(x, o): # returns True if o wins or ties, o's turn to move
if win[x]:
return False
if x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and x_move(x, o | (1 << i)):
good_boards.append(
"".join(".XO"[((x >> j) & 1) + 2 * ((o >> j) & 1) + 2 * (i == j)] for j in range(9)))
return True
return False # X wins
res = x_move(0, 0)
assert res
return good_boards
class RockPaperScissors(Problem):
"""Find optimal strategy for Rock-Paper-Scissors zero-sum game
Find the distribution that guarantees maximum expected value of 0
"""
@staticmethod
def sat(probs: List[float]): # rock prob, paper prob, scissors prob
assert len(probs) == 3 and abs(sum(probs) - 1) < 1e-6
return max(probs[(i + 2) % 3] - probs[(i + 1) % 3] for i in range(3)) < 1e-6
@staticmethod
def sol():
return [1 / 3] * 3
if __name__ == "__main__":
Problem.debug_problems()
| 38.34507
| 120
| 0.559963
|
from problems import Problem
from typing import List
class Nim(Problem):
timeout = 10
@staticmethod
def sat(cert: List[List[int]], heaps=[5, 9]): good_leaves = {tuple(h) for h in cert} cache = {}
def is_good_leave(h):
if h in cache:
return cache[h]
next_states = [(*h[:i], k, *h[i + 1:]) for i in range(len(h)) for k in range(h[i])]
conjecture = (h in good_leaves)
if conjecture: assert not any(is_good_leave(s) for s in next_states)
else: assert is_good_leave(next(s for s in next_states if s in good_leaves))
cache[h] = conjecture
return conjecture
return is_good_leave(tuple(heaps)) == (tuple(heaps) in good_leaves)
@staticmethod
def sol(heaps):
import itertools
def val(h): xor = 0
for i in h:
xor ^= i
return xor == 0
return [list(h) for h in itertools.product(*[range(i + 1) for i in heaps]) if val(h)]
def gen_random(self):
num_heaps = self.random.randrange(10)
heaps = [self.random.randrange(10) for _ in range(num_heaps)]
prod = 1
for i in heaps:
prod *= i + 1
if prod < 10 ** 6:
self.add(dict(heaps=heaps))
class Mastermind(Problem):
timeout = 10
@staticmethod
def sat(transcripts: List[str], max_moves=10):
COLORS = "ABCDEF"
def helper(secret: str, transcript=""):
if transcript.count("\n") == max_moves:
return False
guess = min([t for t in transcripts if t.startswith(transcript)], key=len)[-4:]
if guess == secret:
return True
assert all(g in COLORS for g in guess)
perfect = {c: sum([g == s == c for g, s in zip(guess, secret)]) for c in COLORS}
almost = sum(min(guess.count(c), secret.count(c)) - perfect[c] for c in COLORS)
return helper(secret, transcript + f"{guess} {sum(perfect.values())}{almost}\n")
return all(helper(r + s + t + u) for r in COLORS for s in COLORS for t in COLORS for u in COLORS)
@staticmethod
def sol(max_moves):
COLORS = "ABCDEF"
transcripts = []
ALL = [r + s + t + u for r in COLORS for s in COLORS for t in COLORS for u in COLORS]
def score(secret, guess):
perfect = {c: sum([g == s == c for g, s in zip(guess, secret)]) for c in COLORS}
almost = sum(min(guess.count(c), secret.count(c)) - perfect[c] for c in COLORS)
return f"{sum(perfect.values())}{almost}"
def mastermind(transcript="AABB", feasible=ALL): transcripts.append(transcript)
assert transcript.count("\n") <= max_moves
guess = transcript[-4:]
feasibles = {}
for secret in feasible:
scr = score(secret, guess)
if scr not in feasibles:
feasibles[scr] = []
feasibles[scr].append(secret)
for scr, secrets in feasibles.items():
if scr != "40":
guesser(transcript + f" {scr}\n", secrets)
def guesser(transcript, feasible): def max_ambiguity(guess):
by_score = {}
for secret2 in feasible:
scr = score(secret2, guess)
if scr not in by_score:
by_score[scr] = 0
by_score[scr] += 1
return max(by_score.values())
guess = min(feasible, key=max_ambiguity)
mastermind(transcript + guess, feasible)
mastermind()
return transcripts
def gen(self, target_num_instances):
for max_moves in [6, 8, 10]:
self.add(dict(max_moves=max_moves))
class TicTacToeX(Problem):
@staticmethod
def sat(good_boards: List[str]):
board_bit_reps = {tuple(sum(1 << i for i in range(9) if b[i] == c) for c in "XO") for b in good_boards}
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
def tie(x, o): x |= 1 << next(i for i in range(9) if (x | (1 << i), o) in board_bit_reps)
return not win[o] and (win[x] or all((x | o) & (1 << i) or tie(x, o | (1 << i)) for i in range(9)))
return tie(0, 0)
@staticmethod
def sol():
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
good_boards = []
def x_move(x, o): if win[o]:
return False
if x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and o_move(x | (1 << i), o):
good_boards.append("".join(".XO"[((x >> j) & 1) + 2 * ((o >> j) & 1) + (i == j)] for j in range(9)))
return True
return False # O wins
def o_move(x, o): # returns True if x wins or ties, x's turn to move
if win[x] or x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and not x_move(x, o | (1 << i)):
return False
return True
res = x_move(0, 0)
assert res
return good_boards
class TicTacToeO(Problem):
@staticmethod
def sat(good_boards: List[str]):
board_bit_reps = {tuple(sum(1 << i for i in range(9) if b[i] == c) for c in "XO") for b in good_boards}
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
def tie(x, o): if o | x != 511:
o |= 1 << next(i for i in range(9) if (x, o | (1 << i)) in board_bit_reps)
return not win[x] and (win[o] or all((x | o) & (1 << i) or tie(x | (1 << i), o) for i in range(9)))
return all(tie(1 << i, 0) for i in range(9))
@staticmethod
def sol():
win = [any(i & w == w for w in [7, 56, 73, 84, 146, 273, 292, 448]) for i in range(512)]
good_boards = []
def x_move(x, o): if win[o] or x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and not o_move(x | (1 << i), o):
return False
return True # O wins/ties
def o_move(x, o): # returns True if o wins or ties, o's turn to move
if win[x]:
return False
if x | o == 511:
return True
for i in range(9):
if (x | o) & (1 << i) == 0 and x_move(x, o | (1 << i)):
good_boards.append(
"".join(".XO"[((x >> j) & 1) + 2 * ((o >> j) & 1) + 2 * (i == j)] for j in range(9)))
return True
return False
res = x_move(0, 0)
assert res
return good_boards
class RockPaperScissors(Problem):
@staticmethod
def sat(probs: List[float]): assert len(probs) == 3 and abs(sum(probs) - 1) < 1e-6
return max(probs[(i + 2) % 3] - probs[(i + 1) % 3] for i in range(3)) < 1e-6
@staticmethod
def sol():
return [1 / 3] * 3
if __name__ == "__main__":
Problem.debug_problems()
| true
| true
|
1c4962bf3e816c1cf57f29913651635757597c12
| 6,853
|
py
|
Python
|
cogs/tag.py
|
theoxan/BC_HelperBot
|
0d34d6364588b5649ef4689727197e1cc8a63d36
|
[
"Apache-2.0"
] | null | null | null |
cogs/tag.py
|
theoxan/BC_HelperBot
|
0d34d6364588b5649ef4689727197e1cc8a63d36
|
[
"Apache-2.0"
] | null | null | null |
cogs/tag.py
|
theoxan/BC_HelperBot
|
0d34d6364588b5649ef4689727197e1cc8a63d36
|
[
"Apache-2.0"
] | null | null | null |
import os
from os import path
import json
from difflib import SequenceMatcher
import discord
from discord.ext import commands
from schema import SchemaError
from .utils.misc import tag_shema
from .utils import checkers, misc
class Tag(commands.Cog):
def __init__(self, bot):
self.bot = bot
tags_folder = {
category: {
path.splitext(tag_name)[0]: path.join(path.join('ressources/tags/', category), tag_name) for tag_name in os.listdir(path.join('ressources/tags', category)) if path.isdir(path.join('ressources/tags', category))
} for category in os.listdir('ressources/tags/') if os.path.isdir(os.path.join('ressources/tags/', category))
}
def complete_values(obj, ref=None):
if isinstance(obj, dict):
for key, value in obj.items():
if value == "*" and ref:
obj[key] = ref[key]
else:
obj[key] = complete_values(value, ref=ref[key] if ref else ref)
elif isinstance(obj, list) and all(isinstance(sub_obj, dict) for sub_obj in obj):
for i, sub_obj in enumerate(obj):
if i == 0 and not ref: continue
obj[i] = complete_values(obj[i], ref=ref[i] if ref else obj[0])
return obj
self.tags = {}
for category_name, tags_infos in tags_folder.items():
self.tags[category_name] = {}
for tag_name, tag_path in tags_infos.items():
try:
with open(tag_path, "r", encoding='utf-8') as f:
loaded_tag = json.load(f)
try:
loaded_tag = tag_shema.validate(loaded_tag)
except SchemaError as e:
self.bot.logger.warning(f'The tag {tag_name} from category {category_name} is improper.\n{e}')
continue
self.tags[category_name][loaded_tag["name"]] = complete_values(loaded_tag)
except Exception as e:
print(e)
self.bot.logger.warning(f"The tag {tag_path} cannot be loaded")
@commands.command(
name="tag",
usage="/tag <category> (<tag_name>|'list')",
description="Obtenir de l'aide rapidement"
)
@checkers.authorized_channels()
async def _tag(self, ctx, category=None, *, query=None):
category_tags = self.tags.get(category) # category_tags correspond a un dictionnaire avec plusieurs commandes
if category_tags is None and category is not None:
similors = ((name, SequenceMatcher(None, name, category).ratio()) for name in self.tags.keys())
similors = sorted(similors, key=lambda couple: couple[1], reverse=True)
if similors[0][1] > 0.8:
category = similors[0][0] # nom de la catégorie
category_tags = self.tags.get(category)
if category_tags is None:
format_list = lambda keys: "\n".join([f"- `{key}`" for key in keys])
embed = discord.Embed(
title="Catégorie non trouvée. Essayez parmi :",
description=format_list(self.tags.keys()),
color=discord.Color.from_rgb(47, 49, 54)
)
embed.set_footer(text=ctx.command.usage)
message = await ctx.send(embed=embed)
return await misc.delete_with_emote(ctx, message)
if query is None or query == "list":
format_list = lambda tags_values: "\n".join([f"- `{tag.get('name')}` : {tag.get('description')}" for tag in tags_values])
message = await ctx.channel.send(embed=discord.Embed(title=f"Voici les tags de la catégorie `{category}` :",
description=format_list(category_tags.values()),
color=discord.Color.from_rgb(47, 49, 54))
)
return await misc.delete_with_emote(ctx, message)
tag = category_tags.get(query) or discord.utils.find(lambda tag_: tag_.get('aliases') and query in tag_['aliases'], category_tags.values())
if tag is None:
similors = ((name, SequenceMatcher(None, name, query).ratio()) for name in category_tags.keys())
similors = sorted(similors, key=lambda couple: couple[1], reverse=True)
if similors[0][1] > 0.8:
query = similors[0][0] # nom du tag
tag = category_tags.get(query)
else:
similar_text = f"voulez vous-vous dire `{similors[0][0]}` ? Sinon "
return await ctx.send(f"Le tag n'a pas été trouvé, {similar_text if similors[0][1] > 0.5 else ''}regardez `/tag list`", delete_after=10)
message = None
response = tag.get('response')
choices = response.get('choices')
if choices:
reactions = ['0️⃣', '1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣', '7️⃣', '8️⃣', '9️⃣']
message = await ctx.send("__Choisissez la cible :__\n"+'\n'.join([f"{reactions[i]} - `{choice['choice_name']}`" for i, choice in enumerate(choices)]))
self.bot.loop.create_task(misc.add_reactions(message, reactions[:len(choices)]))
try:
reaction, _ = await self.bot.wait_for('reaction_add', timeout=120, check=lambda react, usr: str(react.emoji) in reactions[:len(choices)] and usr.id == ctx.author.id and react.message.id == message.id)
except TimeoutError:
return await message.delete()
try: await message.clear_reactions()
except: pass
response = choices[reactions.index(str(reaction.emoji))]
embed = discord.Embed.from_dict(response.get("embed"))
embed.color = discord.Color.from_rgb(47, 49, 54)
embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)
text = f'/tag {category} {query}'
url = discord.Embed.Empty
creator = await self.bot.fetch_user(tag.get('author')) if tag.get('author') else None
if creator:
text += f' • par {creator.name}#{creator.discriminator}'
url = creator.avatar_url
embed.set_footer(
text=text,
icon_url=url
)
if message: await message.edit(embed=embed, content="")
else: message = await ctx.channel.send(embed=embed)
try: await ctx.message.delete() # suppression de la commande
except: pass
try: await misc.delete_with_emote(ctx, message)
except: pass
def setup(bot):
bot.add_cog(Tag(bot))
bot.logger.info("Extension [tag] loaded successfully.")
| 44.79085
| 229
| 0.569678
|
import os
from os import path
import json
from difflib import SequenceMatcher
import discord
from discord.ext import commands
from schema import SchemaError
from .utils.misc import tag_shema
from .utils import checkers, misc
class Tag(commands.Cog):
def __init__(self, bot):
self.bot = bot
tags_folder = {
category: {
path.splitext(tag_name)[0]: path.join(path.join('ressources/tags/', category), tag_name) for tag_name in os.listdir(path.join('ressources/tags', category)) if path.isdir(path.join('ressources/tags', category))
} for category in os.listdir('ressources/tags/') if os.path.isdir(os.path.join('ressources/tags/', category))
}
def complete_values(obj, ref=None):
if isinstance(obj, dict):
for key, value in obj.items():
if value == "*" and ref:
obj[key] = ref[key]
else:
obj[key] = complete_values(value, ref=ref[key] if ref else ref)
elif isinstance(obj, list) and all(isinstance(sub_obj, dict) for sub_obj in obj):
for i, sub_obj in enumerate(obj):
if i == 0 and not ref: continue
obj[i] = complete_values(obj[i], ref=ref[i] if ref else obj[0])
return obj
self.tags = {}
for category_name, tags_infos in tags_folder.items():
self.tags[category_name] = {}
for tag_name, tag_path in tags_infos.items():
try:
with open(tag_path, "r", encoding='utf-8') as f:
loaded_tag = json.load(f)
try:
loaded_tag = tag_shema.validate(loaded_tag)
except SchemaError as e:
self.bot.logger.warning(f'The tag {tag_name} from category {category_name} is improper.\n{e}')
continue
self.tags[category_name][loaded_tag["name"]] = complete_values(loaded_tag)
except Exception as e:
print(e)
self.bot.logger.warning(f"The tag {tag_path} cannot be loaded")
@commands.command(
name="tag",
usage="/tag <category> (<tag_name>|'list')",
description="Obtenir de l'aide rapidement"
)
@checkers.authorized_channels()
async def _tag(self, ctx, category=None, *, query=None):
category_tags = self.tags.get(category) # category_tags correspond a un dictionnaire avec plusieurs commandes
if category_tags is None and category is not None:
similors = ((name, SequenceMatcher(None, name, category).ratio()) for name in self.tags.keys())
similors = sorted(similors, key=lambda couple: couple[1], reverse=True)
if similors[0][1] > 0.8:
category = similors[0][0] # nom de la catégorie
category_tags = self.tags.get(category)
if category_tags is None:
format_list = lambda keys: "\n".join([f"- `{key}`" for key in keys])
embed = discord.Embed(
title="Catégorie non trouvée. Essayez parmi :",
description=format_list(self.tags.keys()),
color=discord.Color.from_rgb(47, 49, 54)
)
embed.set_footer(text=ctx.command.usage)
message = await ctx.send(embed=embed)
return await misc.delete_with_emote(ctx, message)
if query is None or query == "list":
format_list = lambda tags_values: "\n".join([f"- `{tag.get('name')}` : {tag.get('description')}" for tag in tags_values])
message = await ctx.channel.send(embed=discord.Embed(title=f"Voici les tags de la catégorie `{category}` :",
description=format_list(category_tags.values()),
color=discord.Color.from_rgb(47, 49, 54))
)
return await misc.delete_with_emote(ctx, message)
tag = category_tags.get(query) or discord.utils.find(lambda tag_: tag_.get('aliases') and query in tag_['aliases'], category_tags.values())
if tag is None:
similors = ((name, SequenceMatcher(None, name, query).ratio()) for name in category_tags.keys())
similors = sorted(similors, key=lambda couple: couple[1], reverse=True)
if similors[0][1] > 0.8:
query = similors[0][0] # nom du tag
tag = category_tags.get(query)
else:
similar_text = f"voulez vous-vous dire `{similors[0][0]}` ? Sinon "
return await ctx.send(f"Le tag n'a pas été trouvé, {similar_text if similors[0][1] > 0.5 else ''}regardez `/tag list`", delete_after=10)
message = None
response = tag.get('response')
choices = response.get('choices')
if choices:
reactions = ['0️⃣', '1️⃣', '2️⃣', '3️⃣', '4️⃣', '5️⃣', '6️⃣', '7️⃣', '8️⃣', '9️⃣']
message = await ctx.send("__Choisissez la cible :__\n"+'\n'.join([f"{reactions[i]} - `{choice['choice_name']}`" for i, choice in enumerate(choices)]))
self.bot.loop.create_task(misc.add_reactions(message, reactions[:len(choices)]))
try:
reaction, _ = await self.bot.wait_for('reaction_add', timeout=120, check=lambda react, usr: str(react.emoji) in reactions[:len(choices)] and usr.id == ctx.author.id and react.message.id == message.id)
except TimeoutError:
return await message.delete()
try: await message.clear_reactions()
except: pass
response = choices[reactions.index(str(reaction.emoji))]
embed = discord.Embed.from_dict(response.get("embed"))
embed.color = discord.Color.from_rgb(47, 49, 54)
embed.set_author(name=ctx.author.display_name, icon_url=ctx.author.avatar_url)
text = f'/tag {category} {query}'
url = discord.Embed.Empty
creator = await self.bot.fetch_user(tag.get('author')) if tag.get('author') else None
if creator:
text += f' • par {creator.name}#{creator.discriminator}'
url = creator.avatar_url
embed.set_footer(
text=text,
icon_url=url
)
if message: await message.edit(embed=embed, content="")
else: message = await ctx.channel.send(embed=embed)
try: await ctx.message.delete() except: pass
try: await misc.delete_with_emote(ctx, message)
except: pass
def setup(bot):
bot.add_cog(Tag(bot))
bot.logger.info("Extension [tag] loaded successfully.")
| true
| true
|
1c4962dc7ba607d9e75b274ac8278eb1eb299cef
| 1,718
|
py
|
Python
|
Projects/Online Workouts/w3resource/Basic - Part-II/program-29.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | 1
|
2019-09-23T15:51:45.000Z
|
2019-09-23T15:51:45.000Z
|
Projects/Online Workouts/w3resource/Basic - Part-II/program-29.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | 5
|
2021-02-08T20:47:19.000Z
|
2022-03-12T00:35:44.000Z
|
Projects/Online Workouts/w3resource/Basic - Part-II/program-29.py
|
ivenpoker/Python-Projects
|
2975e1bd687ec8dbcc7a4842c13466cb86292679
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#############################################################################
# #
# Program purpose: Find common divisor between two numbers in a given #
# pair. #
# Program Author : Happi Yvan <ivensteinpoker@gmail.com> #
# Creation Date : September 9, 2019 #
# #
#############################################################################
def find_divisor(num: int):
div_data = [x for x in range(1, num+1) if num % x is 0]
return div_data
def find_intersections(list_a: list, list_b: list):
main_inter = []
for x in range(len(list_a)):
if list_a[x] in list_b:
main_inter.append(list_a[x])
return main_inter
if __name__ == "__main__":
int_a = 0
int_b = 0
cont = True
# Get the first integer.
while cont:
try:
int_a = int(input("Enter first number: "))
cont = False
except ValueError as ve:
print(f"{ve}")
cont = True
# Get the second integer
while cont:
try:
int_b = int(input("Enter second number: "))
cont = False
except ValueError as ve:
print(f"{ve}")
div_a = find_divisor(int_a)
div_b = find_divisor(int_b)
print(f"Divisors of {int_a}: {div_a}")
print(f"Divisors of {int_b}: {div_b}")
print(f"Common divisors of {int_a} and {int_b}: "
f"{find_intersections(list_a=div_a, list_b=div_b)}")
| 31.236364
| 77
| 0.438882
|
def find_divisor(num: int):
div_data = [x for x in range(1, num+1) if num % x is 0]
return div_data
def find_intersections(list_a: list, list_b: list):
main_inter = []
for x in range(len(list_a)):
if list_a[x] in list_b:
main_inter.append(list_a[x])
return main_inter
if __name__ == "__main__":
int_a = 0
int_b = 0
cont = True
while cont:
try:
int_a = int(input("Enter first number: "))
cont = False
except ValueError as ve:
print(f"{ve}")
cont = True
while cont:
try:
int_b = int(input("Enter second number: "))
cont = False
except ValueError as ve:
print(f"{ve}")
div_a = find_divisor(int_a)
div_b = find_divisor(int_b)
print(f"Divisors of {int_a}: {div_a}")
print(f"Divisors of {int_b}: {div_b}")
print(f"Common divisors of {int_a} and {int_b}: "
f"{find_intersections(list_a=div_a, list_b=div_b)}")
| true
| true
|
1c49648eb9b542c70be44a372ce24b2211d6407b
| 777
|
py
|
Python
|
yoti_python_sdk/doc_scan/session/retrieve/document_id_photo_response.py
|
getyoti/python
|
3df169145d5c818d0e79743768dde78e482eec9b
|
[
"MIT"
] | 9
|
2017-11-12T05:38:58.000Z
|
2021-08-04T16:33:26.000Z
|
yoti_python_sdk/doc_scan/session/retrieve/document_id_photo_response.py
|
getyoti/python
|
3df169145d5c818d0e79743768dde78e482eec9b
|
[
"MIT"
] | 237
|
2017-04-26T09:40:44.000Z
|
2022-02-24T10:29:43.000Z
|
yoti_python_sdk/doc_scan/session/retrieve/document_id_photo_response.py
|
getyoti/python
|
3df169145d5c818d0e79743768dde78e482eec9b
|
[
"MIT"
] | 9
|
2017-05-02T11:41:44.000Z
|
2021-04-28T13:49:20.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from yoti_python_sdk.doc_scan.session.retrieve.media_response import MediaResponse
class DocumentIdPhotoResponse(object):
"""
Represents the document ID photo response
"""
def __init__(self, data=None):
"""
:param data: the data to parse
:type data: dict or None
"""
if data is None:
data = dict()
if "media" in data.keys():
self.__media = MediaResponse(data["media"])
else:
self.__media = None
@property
def media(self):
"""
The media object for the document ID photo
:return: the media
:rtype: MediaResponse or None
"""
return self.__media
| 22.852941
| 82
| 0.584299
|
from __future__ import unicode_literals
from yoti_python_sdk.doc_scan.session.retrieve.media_response import MediaResponse
class DocumentIdPhotoResponse(object):
def __init__(self, data=None):
if data is None:
data = dict()
if "media" in data.keys():
self.__media = MediaResponse(data["media"])
else:
self.__media = None
@property
def media(self):
return self.__media
| true
| true
|
1c49663cca5de7c6f1eee0f2b738acf05391f261
| 6,920
|
py
|
Python
|
gcpdiag/queries/logs.py
|
taylorjstacey/gcpdiag
|
84ba1725cd3ed326b8da3e64bdd6569ed7ef20a4
|
[
"Apache-2.0"
] | null | null | null |
gcpdiag/queries/logs.py
|
taylorjstacey/gcpdiag
|
84ba1725cd3ed326b8da3e64bdd6569ed7ef20a4
|
[
"Apache-2.0"
] | null | null | null |
gcpdiag/queries/logs.py
|
taylorjstacey/gcpdiag
|
84ba1725cd3ed326b8da3e64bdd6569ed7ef20a4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Queries related to Cloud Logging.
The main functionality is querying log entries, which is supposed to be used as
follows:
1. Call query() with the logs query parameters that you need. This
returns a LogsQuery object which can be used to retrieve the logs later.
2. Call execute_queries() to execute all log query jobs. Similar
queries will be grouped together to minimize the number of required API
calls.
Multiple queries will be done in parallel, while always respecting the
Cloud Logging limit of 60 queries per 60 seconds.
3. Use the entries property on the LogsQuery object to iterate over the fetched
logs. Note that the entries are not guaranteed to be filtered by what was
given in the "filter_str" argument to query(), you will need to filter out
the entries in code as well when iterating over the log entries.
Side note: this module is not called 'logging' to avoid using the same name as
the standard python library for logging.
"""
import concurrent.futures
import dataclasses
import datetime
import logging
from typing import Any, Dict, Mapping, Optional, Sequence, Set, Tuple
import dateutil.parser
import ratelimit
from gcpdiag import caching, config
from gcpdiag.queries import apis
@dataclasses.dataclass
class _LogsQueryJob:
"""A group of log queries that will be executed with a single API call."""
project_id: str
resource_type: str
log_name: str
filters: Set[str]
future: Optional[concurrent.futures.Future] = None
class LogsQuery:
"""A log search job that was started with prefetch_logs()."""
job: _LogsQueryJob
def __init__(self, job):
self.job = job
@property
def entries(self) -> Sequence:
if not self.job.future:
raise RuntimeError(
'log query wasn\'t executed. did you forget to call execute_queries()?'
)
elif self.job.future.running():
logging.info(
'waiting for logs query results (project: %s, resource type: %s)',
self.job.project_id, self.job.resource_type)
return self.job.future.result()
jobs_todo: Dict[Tuple[str, str, str], _LogsQueryJob] = {}
def query(project_id: str, resource_type: str, log_name: str,
filter_str: str) -> LogsQuery:
# Aggregate by project_id, resource_type, log_name
job_key = (project_id, resource_type, log_name)
job = jobs_todo.setdefault(
job_key,
_LogsQueryJob(
project_id=project_id,
resource_type=resource_type,
log_name=log_name,
filters=set(),
))
job.filters.add(filter_str)
return LogsQuery(job=job)
@ratelimit.sleep_and_retry
@ratelimit.limits(calls=config.LOGGING_RATELIMIT_REQUESTS,
period=config.LOGGING_RATELIMIT_PERIOD_SECONDS)
def _ratelimited_execute(req):
"""Wrapper to req.execute() with rate limiting to avoid hitting quotas."""
return req.execute(num_retries=config.API_RETRIES)
def _execute_query_job(job: _LogsQueryJob):
logging_api = apis.get_api('logging', 'v2', job.project_id)
# Convert "within" relative time to an absolute timestamp.
start_time = datetime.datetime.now(
datetime.timezone.utc) - datetime.timedelta(days=config.WITHIN_DAYS)
filter_lines = ['timestamp>"%s"' % start_time.isoformat(timespec='seconds')]
filter_lines.append('resource.type="%s"' % job.resource_type)
if job.log_name.startswith('log_id('):
# Special case: log_id(logname)
# https://cloud.google.com/logging/docs/view/logging-query-language#functions
filter_lines.append(job.log_name)
else:
filter_lines.append('logName="%s"' % job.log_name)
if len(job.filters) == 1:
filter_lines.append('(' + next(iter(job.filters)) + ')')
else:
filter_lines.append(
'(' + ' OR '.join(['(' + val + ')' for val in sorted(job.filters)]) +
')')
filter_str = '\n'.join(filter_lines)
logging.info('searching logs in project %s (resource type: %s)',
job.project_id, job.resource_type)
# Fetch all logs and put the results in temporary storage (diskcache.Deque)
deque = caching.get_tmp_deque('tmp-logs-')
req = logging_api.entries().list(
body={
'resourceNames': [f'projects/{job.project_id}'],
'filter': filter_str,
'orderBy': 'timestamp desc',
'pageSize': config.LOGGING_PAGE_SIZE
})
fetched_entries_count = 0
query_pages = 0
query_start_time = datetime.datetime.now()
while req is not None:
query_pages += 1
res = _ratelimited_execute(req)
if 'entries' in res:
for e in res['entries']:
fetched_entries_count += 1
deque.appendleft(e)
# Verify that we aren't above limits, exit otherwise.
if fetched_entries_count > config.LOGGING_FETCH_MAX_ENTRIES:
logging.warning(
'maximum number of log entries (%d) reached (project: %s, query: %s).',
config.LOGGING_FETCH_MAX_ENTRIES, job.project_id,
filter_str.replace('\n', ' AND '))
return deque
run_time = (datetime.datetime.now() - query_start_time).total_seconds()
if run_time >= config.LOGGING_FETCH_MAX_TIME_SECONDS:
logging.warning(
'maximum query runtime for log query reached (project: %s, query: %s).',
job.project_id, filter_str.replace('\n', ' AND '))
return deque
req = logging_api.entries().list_next(req, res)
if req is not None:
logging.info(
'still fetching logs (project: %s, resource type: %s, max wait: %ds)',
job.project_id, job.resource_type,
config.LOGGING_FETCH_MAX_TIME_SECONDS - run_time)
query_end_time = datetime.datetime.now()
logging.debug('logging query run time: %s, pages: %d, query: %s',
query_end_time - query_start_time, query_pages,
filter_str.replace('\n', ' AND '))
return deque
def execute_queries(executor: concurrent.futures.Executor):
global jobs_todo
jobs_executing = jobs_todo
jobs_todo = {}
for job in jobs_executing.values():
job.future = executor.submit(_execute_query_job, job)
def log_entry_timestamp_str(log_entry: Mapping[str, Any]):
# Use receiveTimestamp so that we don't have any time synchronization issues
# (i.e. don't trust the timestamp field)
t = dateutil.parser.parse(log_entry['receiveTimestamp'])
return t.astimezone().isoformat(sep=' ', timespec='seconds')
| 35.854922
| 82
| 0.702601
|
import concurrent.futures
import dataclasses
import datetime
import logging
from typing import Any, Dict, Mapping, Optional, Sequence, Set, Tuple
import dateutil.parser
import ratelimit
from gcpdiag import caching, config
from gcpdiag.queries import apis
@dataclasses.dataclass
class _LogsQueryJob:
project_id: str
resource_type: str
log_name: str
filters: Set[str]
future: Optional[concurrent.futures.Future] = None
class LogsQuery:
job: _LogsQueryJob
def __init__(self, job):
self.job = job
@property
def entries(self) -> Sequence:
if not self.job.future:
raise RuntimeError(
'log query wasn\'t executed. did you forget to call execute_queries()?'
)
elif self.job.future.running():
logging.info(
'waiting for logs query results (project: %s, resource type: %s)',
self.job.project_id, self.job.resource_type)
return self.job.future.result()
jobs_todo: Dict[Tuple[str, str, str], _LogsQueryJob] = {}
def query(project_id: str, resource_type: str, log_name: str,
filter_str: str) -> LogsQuery:
# Aggregate by project_id, resource_type, log_name
job_key = (project_id, resource_type, log_name)
job = jobs_todo.setdefault(
job_key,
_LogsQueryJob(
project_id=project_id,
resource_type=resource_type,
log_name=log_name,
filters=set(),
))
job.filters.add(filter_str)
return LogsQuery(job=job)
@ratelimit.sleep_and_retry
@ratelimit.limits(calls=config.LOGGING_RATELIMIT_REQUESTS,
period=config.LOGGING_RATELIMIT_PERIOD_SECONDS)
def _ratelimited_execute(req):
return req.execute(num_retries=config.API_RETRIES)
def _execute_query_job(job: _LogsQueryJob):
logging_api = apis.get_api('logging', 'v2', job.project_id)
# Convert "within" relative time to an absolute timestamp.
start_time = datetime.datetime.now(
datetime.timezone.utc) - datetime.timedelta(days=config.WITHIN_DAYS)
filter_lines = ['timestamp>"%s"' % start_time.isoformat(timespec='seconds')]
filter_lines.append('resource.type="%s"' % job.resource_type)
if job.log_name.startswith('log_id('):
# Special case: log_id(logname)
# https://cloud.google.com/logging/docs/view/logging-query-language#functions
filter_lines.append(job.log_name)
else:
filter_lines.append('logName="%s"' % job.log_name)
if len(job.filters) == 1:
filter_lines.append('(' + next(iter(job.filters)) + ')')
else:
filter_lines.append(
'(' + ' OR '.join(['(' + val + ')' for val in sorted(job.filters)]) +
')')
filter_str = '\n'.join(filter_lines)
logging.info('searching logs in project %s (resource type: %s)',
job.project_id, job.resource_type)
# Fetch all logs and put the results in temporary storage (diskcache.Deque)
deque = caching.get_tmp_deque('tmp-logs-')
req = logging_api.entries().list(
body={
'resourceNames': [f'projects/{job.project_id}'],
'filter': filter_str,
'orderBy': 'timestamp desc',
'pageSize': config.LOGGING_PAGE_SIZE
})
fetched_entries_count = 0
query_pages = 0
query_start_time = datetime.datetime.now()
while req is not None:
query_pages += 1
res = _ratelimited_execute(req)
if 'entries' in res:
for e in res['entries']:
fetched_entries_count += 1
deque.appendleft(e)
# Verify that we aren't above limits, exit otherwise.
if fetched_entries_count > config.LOGGING_FETCH_MAX_ENTRIES:
logging.warning(
'maximum number of log entries (%d) reached (project: %s, query: %s).',
config.LOGGING_FETCH_MAX_ENTRIES, job.project_id,
filter_str.replace('\n', ' AND '))
return deque
run_time = (datetime.datetime.now() - query_start_time).total_seconds()
if run_time >= config.LOGGING_FETCH_MAX_TIME_SECONDS:
logging.warning(
'maximum query runtime for log query reached (project: %s, query: %s).',
job.project_id, filter_str.replace('\n', ' AND '))
return deque
req = logging_api.entries().list_next(req, res)
if req is not None:
logging.info(
'still fetching logs (project: %s, resource type: %s, max wait: %ds)',
job.project_id, job.resource_type,
config.LOGGING_FETCH_MAX_TIME_SECONDS - run_time)
query_end_time = datetime.datetime.now()
logging.debug('logging query run time: %s, pages: %d, query: %s',
query_end_time - query_start_time, query_pages,
filter_str.replace('\n', ' AND '))
return deque
def execute_queries(executor: concurrent.futures.Executor):
global jobs_todo
jobs_executing = jobs_todo
jobs_todo = {}
for job in jobs_executing.values():
job.future = executor.submit(_execute_query_job, job)
def log_entry_timestamp_str(log_entry: Mapping[str, Any]):
# (i.e. don't trust the timestamp field)
t = dateutil.parser.parse(log_entry['receiveTimestamp'])
return t.astimezone().isoformat(sep=' ', timespec='seconds')
| true
| true
|
1c49666b9c4d832f37834fa730f66dc1774b3e18
| 1,174
|
py
|
Python
|
Adafruit_DHT/__init__.py
|
HydAu/Adafruit_Python_DHT
|
9e8109bb4ab5ec9127e53e792c1f69eddfd2f687
|
[
"MIT"
] | 1
|
2015-11-17T15:05:13.000Z
|
2015-11-17T15:05:13.000Z
|
Adafruit_DHT/__init__.py
|
HydAu/Adafruit_Python_DHT
|
9e8109bb4ab5ec9127e53e792c1f69eddfd2f687
|
[
"MIT"
] | null | null | null |
Adafruit_DHT/__init__.py
|
HydAu/Adafruit_Python_DHT
|
9e8109bb4ab5ec9127e53e792c1f69eddfd2f687
|
[
"MIT"
] | 1
|
2016-02-14T11:59:45.000Z
|
2016-02-14T11:59:45.000Z
|
# Copyright (c) 2014 Adafruit Industries
# Author: Tony DiCola
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from common import DHT11, DHT22, AM2302, read, read_retry
| 55.904762
| 80
| 0.783646
|
from common import DHT11, DHT22, AM2302, read, read_retry
| true
| true
|
1c4966ade42aaa97510f7628a11791f6090266df
| 123
|
py
|
Python
|
game/admin.py
|
0xecho/2048-er
|
732f9c250f8cb632068a93d4622d9f7d2f65a147
|
[
"MIT"
] | 5
|
2021-10-04T15:38:58.000Z
|
2021-12-30T07:43:30.000Z
|
game/admin.py
|
0xecho/2048-er
|
732f9c250f8cb632068a93d4622d9f7d2f65a147
|
[
"MIT"
] | null | null | null |
game/admin.py
|
0xecho/2048-er
|
732f9c250f8cb632068a93d4622d9f7d2f65a147
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from . import models
# Register your models here.
admin.site.register(models.Submission)
| 20.5
| 38
| 0.804878
|
from django.contrib import admin
from . import models
admin.site.register(models.Submission)
| true
| true
|
1c496702676689a5a25c37ec1873b560deec1093
| 18,565
|
py
|
Python
|
ucscsdk/mometa/license/LicenseDownloader.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 9
|
2016-12-22T08:39:25.000Z
|
2019-09-10T15:36:19.000Z
|
ucscsdk/mometa/license/LicenseDownloader.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 10
|
2017-01-31T06:59:56.000Z
|
2021-11-09T09:14:37.000Z
|
ucscsdk/mometa/license/LicenseDownloader.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 13
|
2016-11-14T07:42:58.000Z
|
2022-02-10T17:32:05.000Z
|
"""This module contains the general information for LicenseDownloader ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class LicenseDownloaderConsts():
ADMIN_STATE_IDLE = "idle"
ADMIN_STATE_RESTART = "restart"
FSM_PREV_DOWNLOAD_BEGIN = "DownloadBegin"
FSM_PREV_DOWNLOAD_DELETE_LOCAL = "DownloadDeleteLocal"
FSM_PREV_DOWNLOAD_FAIL = "DownloadFail"
FSM_PREV_DOWNLOAD_LOCAL = "DownloadLocal"
FSM_PREV_DOWNLOAD_SUCCESS = "DownloadSuccess"
FSM_PREV_DOWNLOAD_VALIDATE_LOCAL = "DownloadValidateLocal"
FSM_PREV_NOP = "nop"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_ERROR = "ERR-DNLD-error"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
FSM_RMT_INV_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_DIGEST_VALIDATION_ERROR = "ERR-Digest-Validation-error"
FSM_RMT_INV_ERR_CODE_ERR_EXEC_GEN_CERT_ERROR = "ERR-Exec-Gen-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_EXEC_GET_CA_CERT_ERROR = "ERR-Exec-Get-CA-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
FSM_RMT_INV_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
FSM_RMT_INV_ERR_CODE_ERR_GET_CA_CERT_ERROR = "ERR-Get-CA-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_CERT_ERROR = "ERR-Get-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_OUT_DIGET_MESSAGE_ERROR = "ERR-Get-Out-Diget-Message-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_REQUEST_ERROR = "ERR-HTTP-Request-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_IPV6_ADDR_CONFIGURED = "ERR-Ipv6-addr-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
FSM_RMT_INV_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
FSM_RMT_INV_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
FSM_RMT_INV_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
FSM_RMT_INV_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
FSM_RMT_INV_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_POLICY_RESOLUTION_IN_PROGRESS = "ERR-Policy-resolution-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_VM_IP_MASK_GATEWAY_ERROR = "ERR-Update-VM-IP-Mask-Gateway-error"
FSM_RMT_INV_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
FSM_RMT_INV_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_CHASSISPACK_UNDER_DG = "ERR-create-chassispack-under-dg"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_HFP_UNDER_DG = "ERR-create-hfp-under-dg"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
FSM_RMT_INV_ERR_CODE_ERR_ESTIMATE_IMPACT_ON_RECONNECT = "ERR-estimate-impact-on-reconnect"
FSM_RMT_INV_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
FSM_RMT_INV_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
FSM_RMT_INV_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
FSM_RMT_INV_ERR_CODE_ERR_NFS_DOWN = "ERR-nfs-down"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
FSM_RMT_INV_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
FSM_RMT_INV_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
FSM_RMT_INV_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
FSM_RMT_INV_ERR_CODE_NONE = "none"
FSM_STAMP_NEVER = "never"
FSM_STATUS_DOWNLOAD_BEGIN = "DownloadBegin"
FSM_STATUS_DOWNLOAD_DELETE_LOCAL = "DownloadDeleteLocal"
FSM_STATUS_DOWNLOAD_FAIL = "DownloadFail"
FSM_STATUS_DOWNLOAD_LOCAL = "DownloadLocal"
FSM_STATUS_DOWNLOAD_SUCCESS = "DownloadSuccess"
FSM_STATUS_DOWNLOAD_VALIDATE_LOCAL = "DownloadValidateLocal"
FSM_STATUS_NOP = "nop"
PROT_FTP = "ftp"
PROT_LOCAL = "local"
PROT_SCP = "scp"
PROT_SFTP = "sftp"
PROT_TFTP = "tftp"
TRANSFER_STATE_DOWNLOADED = "downloaded"
TRANSFER_STATE_DOWNLOADING = "downloading"
TRANSFER_STATE_FAILED = "failed"
TRANSFER_STATE_INIT = "init"
class LicenseDownloader(ManagedObject):
"""This is LicenseDownloader class."""
consts = LicenseDownloaderConsts()
naming_props = set([u'fileName'])
mo_meta = MoMeta("LicenseDownloader", "licenseDownloader", "dnld-[file_name]", VersionMeta.Version111a, "InputOutput", 0x7ff, [], ["admin"], [u'licenseEp'], [u'eventInst', u'faultInst', u'licenseDownloaderFsm', u'licenseDownloaderFsmTask', u'licenseProp'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["idle", "restart"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"file_name": MoPropertyMeta("file_name", "fileName", "string", VersionMeta.Version111a, MoPropertyMeta.NAMING, 0x8, 1, 64, None, [], []),
"fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_prev": MoPropertyMeta("fsm_prev", "fsmPrev", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["DownloadBegin", "DownloadDeleteLocal", "DownloadFail", "DownloadLocal", "DownloadSuccess", "DownloadValidateLocal", "nop"], []),
"fsm_progr": MoPropertyMeta("fsm_progr", "fsmProgr", "byte", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-100"]),
"fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-error", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Digest-Validation-error", "ERR-Exec-Gen-Cert-error", "ERR-Exec-Get-CA-Cert-error", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-Get-CA-Cert-error", "ERR-Get-Cert-error", "ERR-Get-Out-Diget-Message-error", "ERR-HTTP-Request-error", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-Ipv6-addr-configured", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-Policy-resolution-in-progress", "ERR-TOKEN-request-denied", "ERR-Update-VM-IP-Mask-Gateway-error", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-create-chassispack-under-dg", "ERR-create-hfp-under-dg", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-estimate-impact-on-reconnect", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-nfs-down", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-role-set-error", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-password-strength-check", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-set-error", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-set-error", "none"], ["0-4294967295"]),
"fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []),
"fsm_rmt_inv_rslt": MoPropertyMeta("fsm_rmt_inv_rslt", "fsmRmtInvRslt", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_stamp": MoPropertyMeta("fsm_stamp", "fsmStamp", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["never"], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["DownloadBegin", "DownloadDeleteLocal", "DownloadFail", "DownloadLocal", "DownloadSuccess", "DownloadValidateLocal", "nop"], []),
"fsm_try": MoPropertyMeta("fsm_try", "fsmTry", "byte", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"prot": MoPropertyMeta("prot", "prot", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["ftp", "local", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, [], []),
"remote_path": MoPropertyMeta("remote_path", "remotePath", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"server": MoPropertyMeta("server", "server", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x100, 1, 64, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"transfer_state": MoPropertyMeta("transfer_state", "transferState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["downloaded", "downloading", "failed", "init"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x400, 0, 510, None, [], []),
}
prop_map = {
"adminState": "admin_state",
"childAction": "child_action",
"dn": "dn",
"fileName": "file_name",
"fsmDescr": "fsm_descr",
"fsmPrev": "fsm_prev",
"fsmProgr": "fsm_progr",
"fsmRmtInvErrCode": "fsm_rmt_inv_err_code",
"fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr",
"fsmRmtInvRslt": "fsm_rmt_inv_rslt",
"fsmStageDescr": "fsm_stage_descr",
"fsmStamp": "fsm_stamp",
"fsmStatus": "fsm_status",
"fsmTry": "fsm_try",
"prot": "prot",
"pwd": "pwd",
"remotePath": "remote_path",
"rn": "rn",
"server": "server",
"status": "status",
"transferState": "transfer_state",
"user": "user",
}
def __init__(self, parent_mo_or_dn, file_name, **kwargs):
self._dirty_mask = 0
self.file_name = file_name
self.admin_state = None
self.child_action = None
self.fsm_descr = None
self.fsm_prev = None
self.fsm_progr = None
self.fsm_rmt_inv_err_code = None
self.fsm_rmt_inv_err_descr = None
self.fsm_rmt_inv_rslt = None
self.fsm_stage_descr = None
self.fsm_stamp = None
self.fsm_status = None
self.fsm_try = None
self.prot = None
self.pwd = None
self.remote_path = None
self.server = None
self.status = None
self.transfer_state = None
self.user = None
ManagedObject.__init__(self, "LicenseDownloader", parent_mo_or_dn, **kwargs)
| 86.348837
| 2,742
| 0.752707
|
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class LicenseDownloaderConsts():
ADMIN_STATE_IDLE = "idle"
ADMIN_STATE_RESTART = "restart"
FSM_PREV_DOWNLOAD_BEGIN = "DownloadBegin"
FSM_PREV_DOWNLOAD_DELETE_LOCAL = "DownloadDeleteLocal"
FSM_PREV_DOWNLOAD_FAIL = "DownloadFail"
FSM_PREV_DOWNLOAD_LOCAL = "DownloadLocal"
FSM_PREV_DOWNLOAD_SUCCESS = "DownloadSuccess"
FSM_PREV_DOWNLOAD_VALIDATE_LOCAL = "DownloadValidateLocal"
FSM_PREV_NOP = "nop"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_ERROR = "ERR-DNLD-error"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
FSM_RMT_INV_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_DIGEST_VALIDATION_ERROR = "ERR-Digest-Validation-error"
FSM_RMT_INV_ERR_CODE_ERR_EXEC_GEN_CERT_ERROR = "ERR-Exec-Gen-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_EXEC_GET_CA_CERT_ERROR = "ERR-Exec-Get-CA-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
FSM_RMT_INV_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
FSM_RMT_INV_ERR_CODE_ERR_GET_CA_CERT_ERROR = "ERR-Get-CA-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_CERT_ERROR = "ERR-Get-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_OUT_DIGET_MESSAGE_ERROR = "ERR-Get-Out-Diget-Message-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_REQUEST_ERROR = "ERR-HTTP-Request-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_IPV6_ADDR_CONFIGURED = "ERR-Ipv6-addr-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
FSM_RMT_INV_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
FSM_RMT_INV_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
FSM_RMT_INV_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
FSM_RMT_INV_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
FSM_RMT_INV_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_POLICY_RESOLUTION_IN_PROGRESS = "ERR-Policy-resolution-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_VM_IP_MASK_GATEWAY_ERROR = "ERR-Update-VM-IP-Mask-Gateway-error"
FSM_RMT_INV_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
FSM_RMT_INV_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_CHASSISPACK_UNDER_DG = "ERR-create-chassispack-under-dg"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_HFP_UNDER_DG = "ERR-create-hfp-under-dg"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
FSM_RMT_INV_ERR_CODE_ERR_ESTIMATE_IMPACT_ON_RECONNECT = "ERR-estimate-impact-on-reconnect"
FSM_RMT_INV_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
FSM_RMT_INV_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
FSM_RMT_INV_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
FSM_RMT_INV_ERR_CODE_ERR_NFS_DOWN = "ERR-nfs-down"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
FSM_RMT_INV_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
FSM_RMT_INV_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
FSM_RMT_INV_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
FSM_RMT_INV_ERR_CODE_NONE = "none"
FSM_STAMP_NEVER = "never"
FSM_STATUS_DOWNLOAD_BEGIN = "DownloadBegin"
FSM_STATUS_DOWNLOAD_DELETE_LOCAL = "DownloadDeleteLocal"
FSM_STATUS_DOWNLOAD_FAIL = "DownloadFail"
FSM_STATUS_DOWNLOAD_LOCAL = "DownloadLocal"
FSM_STATUS_DOWNLOAD_SUCCESS = "DownloadSuccess"
FSM_STATUS_DOWNLOAD_VALIDATE_LOCAL = "DownloadValidateLocal"
FSM_STATUS_NOP = "nop"
PROT_FTP = "ftp"
PROT_LOCAL = "local"
PROT_SCP = "scp"
PROT_SFTP = "sftp"
PROT_TFTP = "tftp"
TRANSFER_STATE_DOWNLOADED = "downloaded"
TRANSFER_STATE_DOWNLOADING = "downloading"
TRANSFER_STATE_FAILED = "failed"
TRANSFER_STATE_INIT = "init"
class LicenseDownloader(ManagedObject):
consts = LicenseDownloaderConsts()
naming_props = set([u'fileName'])
mo_meta = MoMeta("LicenseDownloader", "licenseDownloader", "dnld-[file_name]", VersionMeta.Version111a, "InputOutput", 0x7ff, [], ["admin"], [u'licenseEp'], [u'eventInst', u'faultInst', u'licenseDownloaderFsm', u'licenseDownloaderFsmTask', u'licenseProp'], ["Add", "Get", "Remove", "Set"])
prop_meta = {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["idle", "restart"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"file_name": MoPropertyMeta("file_name", "fileName", "string", VersionMeta.Version111a, MoPropertyMeta.NAMING, 0x8, 1, 64, None, [], []),
"fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_prev": MoPropertyMeta("fsm_prev", "fsmPrev", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["DownloadBegin", "DownloadDeleteLocal", "DownloadFail", "DownloadLocal", "DownloadSuccess", "DownloadValidateLocal", "nop"], []),
"fsm_progr": MoPropertyMeta("fsm_progr", "fsmProgr", "byte", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-100"]),
"fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-error", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Digest-Validation-error", "ERR-Exec-Gen-Cert-error", "ERR-Exec-Get-CA-Cert-error", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-Get-CA-Cert-error", "ERR-Get-Cert-error", "ERR-Get-Out-Diget-Message-error", "ERR-HTTP-Request-error", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-Ipv6-addr-configured", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-Policy-resolution-in-progress", "ERR-TOKEN-request-denied", "ERR-Update-VM-IP-Mask-Gateway-error", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-create-chassispack-under-dg", "ERR-create-hfp-under-dg", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-estimate-impact-on-reconnect", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-nfs-down", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-role-set-error", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-password-strength-check", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-set-error", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-set-error", "none"], ["0-4294967295"]),
"fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []),
"fsm_rmt_inv_rslt": MoPropertyMeta("fsm_rmt_inv_rslt", "fsmRmtInvRslt", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_stamp": MoPropertyMeta("fsm_stamp", "fsmStamp", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["never"], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["DownloadBegin", "DownloadDeleteLocal", "DownloadFail", "DownloadLocal", "DownloadSuccess", "DownloadValidateLocal", "nop"], []),
"fsm_try": MoPropertyMeta("fsm_try", "fsmTry", "byte", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"prot": MoPropertyMeta("prot", "prot", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["ftp", "local", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, [], []),
"remote_path": MoPropertyMeta("remote_path", "remotePath", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x40, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, 0x80, 0, 256, None, [], []),
"server": MoPropertyMeta("server", "server", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x100, 1, 64, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x200, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"transfer_state": MoPropertyMeta("transfer_state", "transferState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["downloaded", "downloading", "failed", "init"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x400, 0, 510, None, [], []),
}
prop_map = {
"adminState": "admin_state",
"childAction": "child_action",
"dn": "dn",
"fileName": "file_name",
"fsmDescr": "fsm_descr",
"fsmPrev": "fsm_prev",
"fsmProgr": "fsm_progr",
"fsmRmtInvErrCode": "fsm_rmt_inv_err_code",
"fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr",
"fsmRmtInvRslt": "fsm_rmt_inv_rslt",
"fsmStageDescr": "fsm_stage_descr",
"fsmStamp": "fsm_stamp",
"fsmStatus": "fsm_status",
"fsmTry": "fsm_try",
"prot": "prot",
"pwd": "pwd",
"remotePath": "remote_path",
"rn": "rn",
"server": "server",
"status": "status",
"transferState": "transfer_state",
"user": "user",
}
def __init__(self, parent_mo_or_dn, file_name, **kwargs):
self._dirty_mask = 0
self.file_name = file_name
self.admin_state = None
self.child_action = None
self.fsm_descr = None
self.fsm_prev = None
self.fsm_progr = None
self.fsm_rmt_inv_err_code = None
self.fsm_rmt_inv_err_descr = None
self.fsm_rmt_inv_rslt = None
self.fsm_stage_descr = None
self.fsm_stamp = None
self.fsm_status = None
self.fsm_try = None
self.prot = None
self.pwd = None
self.remote_path = None
self.server = None
self.status = None
self.transfer_state = None
self.user = None
ManagedObject.__init__(self, "LicenseDownloader", parent_mo_or_dn, **kwargs)
| true
| true
|
1c496867149d9c74d5f66efd40cf073fe0da023f
| 22,149
|
py
|
Python
|
critiquebrainz/frontend/views/review.py
|
akshaaatt/critiquebrainz
|
39184152af5f23adaa991c4b43ecbbb6f086f809
|
[
"Apache-2.0"
] | 70
|
2015-03-10T00:08:21.000Z
|
2022-02-20T05:36:53.000Z
|
critiquebrainz/frontend/views/review.py
|
akshaaatt/critiquebrainz
|
39184152af5f23adaa991c4b43ecbbb6f086f809
|
[
"Apache-2.0"
] | 279
|
2015-12-08T14:10:45.000Z
|
2022-03-29T13:54:23.000Z
|
critiquebrainz/frontend/views/review.py
|
akshaaatt/critiquebrainz
|
39184152af5f23adaa991c4b43ecbbb6f086f809
|
[
"Apache-2.0"
] | 95
|
2015-03-12T21:39:42.000Z
|
2022-03-10T00:51:04.000Z
|
from math import ceil
from brainzutils.musicbrainz_db.exceptions import NoDataFoundException
from flask import Blueprint, render_template, request, redirect, url_for, jsonify
from flask_babel import gettext, get_locale, lazy_gettext
from flask_login import login_required, current_user
from langdetect import detect
from markdown import markdown
from werkzeug.exceptions import Unauthorized, NotFound, Forbidden, BadRequest
import critiquebrainz.db.comment as db_comment
import critiquebrainz.db.moderation_log as db_moderation_log
import critiquebrainz.db.review as db_review
import critiquebrainz.db.spam_report as db_spam_report
import critiquebrainz.db.users as db_users
from critiquebrainz.db import vote as db_vote, exceptions as db_exceptions, revision as db_revision
from critiquebrainz.db.moderation_log import AdminActions
from critiquebrainz.db.review import ENTITY_TYPES
from critiquebrainz.frontend import flash
from critiquebrainz.frontend.external import mbspotify, soundcloud
from critiquebrainz.frontend.external.musicbrainz_db.entities import get_multiple_entities, get_entity_by_id
from critiquebrainz.frontend.forms.comment import CommentEditForm
from critiquebrainz.frontend.forms.log import AdminActionForm
from critiquebrainz.frontend.forms.review import ReviewCreateForm, ReviewEditForm, ReviewReportForm
from critiquebrainz.frontend.login import admin_view
from critiquebrainz.frontend.views import get_avg_rating
from critiquebrainz.utils import side_by_side_diff
review_bp = Blueprint('review', __name__)
RESULTS_LIMIT = 10
def get_review_or_404(review_id):
"""Get a review using review ID or raise error 404."""
try:
review = db_review.get_by_id(review_id)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("Can't find a review with ID: %(review_id)s!", review_id=review_id))
return review
@review_bp.route('/')
def browse():
entity_type = request.args.get('entity_type', default=None)
if entity_type == 'all':
entity_type = None
page = int(request.args.get('page', default=1))
if page < 1:
return redirect(url_for('.browse'))
limit = 3 * 9 # 9 rows
offset = (page - 1) * limit
reviews, count = db_review.list_reviews(sort='published_on', limit=limit, offset=offset, entity_type=entity_type)
if not reviews:
if page - 1 > count / limit:
return redirect(url_for('review.browse', page=int(ceil(count / limit))))
if not entity_type:
raise NotFound(gettext("No reviews to display."))
# Loading info about entities for reviews
entities = [(str(review["entity_id"]), review["entity_type"]) for review in reviews]
entities_info = get_multiple_entities(entities)
return render_template('review/browse.html', reviews=reviews, entities=entities_info,
page=page, limit=limit, count=count, entity_type=entity_type)
# TODO(psolanki): Refactor this function to remove PyLint warning.
# pylint: disable=too-many-branches
@review_bp.route('/<uuid:id>/revisions/<int:rev>')
@review_bp.route('/<uuid:id>')
def entity(id, rev=None):
review = get_review_or_404(id)
# Not showing review if it isn't published yet and not viewed by author.
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound(gettext("Can't find a review with the specified ID."))
if review["is_hidden"]:
if not current_user.is_admin():
raise Forbidden(gettext("Review has been hidden. "
"You need to be an administrator to view it."))
flash.warn(gettext("Review has been hidden."))
spotify_mappings = None
soundcloud_url = None
if review["entity_type"] == 'release_group':
spotify_mappings = mbspotify.mappings(str(review["entity_id"]))
soundcloud_url = soundcloud.get_url(str(review["entity_id"]))
count = db_revision.get_count(id)
if not rev:
rev = count
if rev < count:
flash.info(gettext('You are viewing an old revision, the review has been updated since then.'))
elif rev > count:
raise NotFound(gettext("The revision you are looking for does not exist."))
revision = db_revision.get(id, offset=count - rev)[0]
if not review["is_draft"] and current_user.is_authenticated:
# if user is logged in, get their vote for this review
try:
vote = db_vote.get(user_id=current_user.id, revision_id=revision['id'])
except db_exceptions.NoDataFoundException:
vote = None
else: # otherwise set vote to None, its value will not be used
vote = None
if revision["text"] is None:
review["text_html"] = None
else:
review["text_html"] = markdown(revision['text'], safe_mode="escape")
review["rating"] = revision["rating"]
user_all_reviews, _ = db_review.list_reviews(
user_id=review["user_id"],
sort="random",
exclude=[review["id"]],
)
other_reviews = user_all_reviews[:3]
avg_rating = get_avg_rating(review["entity_id"], review["entity_type"])
comments, count = db_comment.list_comments(review_id=id)
for comment in comments:
comment["text_html"] = markdown(comment["last_revision"]["text"], safe_mode="escape")
comment_form = CommentEditForm(review_id=id)
return render_template('review/entity/%s.html' % review["entity_type"], review=review,
spotify_mappings=spotify_mappings, soundcloud_url=soundcloud_url,
vote=vote, other_reviews=other_reviews, avg_rating=avg_rating,
comment_count=count, comments=comments, comment_form=comment_form)
@review_bp.route('/<uuid:review_id>/revision/<int:revision_id>')
def redirect_to_entity(review_id, revision_id):
try:
revision_number = db_revision.get_revision_number(review_id, revision_id)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("The revision you are looking for does not exist."))
return redirect(url_for('.entity', id=review_id, rev=revision_number))
@review_bp.route('/<uuid:id>/revisions/compare')
def compare(id):
review = get_review_or_404(id)
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound(gettext("Can't find a review with the specified ID."))
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
count = db_revision.get_count(id)
old, new = int(request.args.get('old') or count - 1), int(request.args.get('new') or count)
if old > count or new > count:
raise NotFound(gettext("The revision(s) you are looking for does not exist."))
if old > new:
return redirect(url_for('.compare', id=id, old=new, new=old))
left = db_revision.get(id, offset=count - old)[0]
right = db_revision.get(id, offset=count - new)[0]
left['number'], right['number'] = old, new
left['text'], right['text'] = side_by_side_diff(left['text'], right['text'])
return render_template('review/compare.html', review=review, left=left, right=right)
@review_bp.route('/<uuid:id>/revisions')
def revisions(id):
review = get_review_or_404(id)
# Not showing review if it isn't published yet and not viewed by author.
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound("Can't find a review with the specified ID.")
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
try:
count = db_revision.get_count(id)
revisions = db_revision.get(id, limit=RESULTS_LIMIT)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("The revision(s) you are looking for does not exist."))
votes = db_revision.get_all_votes(id)
results = list(zip(reversed(range(count - RESULTS_LIMIT, count)), revisions))
return render_template('review/revisions.html', review=review, results=results,
count=count, limit=RESULTS_LIMIT, votes=votes)
@review_bp.route('/<uuid:id>/revisions/more')
def revisions_more(id):
review = get_review_or_404(id)
# Not showing review if it isn't published yet and not viewed by author.
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound("Can't find a review with the specified ID.")
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
page = int(request.args.get('page', default=0))
offset = page * RESULTS_LIMIT
try:
count = db_revision.get_count(id)
revisions = db_revision.get(id, limit=RESULTS_LIMIT, offset=offset)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("The revision(s) you are looking for does not exist."))
votes = db_revision.get_all_votes(id)
results = list(zip(reversed(range(count - offset - RESULTS_LIMIT, count - offset)), revisions))
template = render_template('review/revision_results.html', review=review, results=results, votes=votes, count=count)
return jsonify(results=template, more=(count - offset - RESULTS_LIMIT) > 0)
@review_bp.route('/write/<entity_type>/<entity_id>/', methods=('GET', 'POST'))
@review_bp.route('/write/')
@login_required
def create(entity_type=None, entity_id=None):
if not (entity_id or entity_type):
for allowed_type in ENTITY_TYPES:
if mbid := request.args.get(allowed_type):
entity_type = allowed_type
entity_id = mbid
break
if entity_type:
return redirect(url_for('.create', entity_type=entity_type, entity_id=entity_id))
flash.info(gettext("Please choose an entity to review."))
return redirect(url_for('search.selector', next=url_for('.create')))
if entity_type not in ENTITY_TYPES:
raise BadRequest("You can't write reviews about this type of entity.")
if current_user.is_blocked:
flash.error(gettext("You are not allowed to write new reviews because your "
"account has been blocked by a moderator."))
return redirect(url_for('user.reviews', user_id=current_user.id))
# Checking if the user already wrote a review for this entity
reviews, count = db_review.list_reviews(user_id=current_user.id, entity_id=entity_id, inc_drafts=True, inc_hidden=True)
review = reviews[0] if count != 0 else None
if review:
if review['is_draft']:
return redirect(url_for('review.edit', id=review['id']))
elif review['is_hidden']:
return redirect(url_for('review.entity', id=review['id']))
else:
flash.error(gettext("You have already published a review for this entity"))
return redirect(url_for('review.entity', id=review["id"]))
if current_user.is_review_limit_exceeded:
flash.error(gettext("You have exceeded your limit of reviews per day."))
return redirect(url_for('user.reviews', user_id=current_user.id))
form = ReviewCreateForm(default_license_id=current_user.license_choice, default_language=get_locale())
if form.validate_on_submit():
is_draft = form.state.data == 'draft'
if form.text.data == '':
form.text.data = None
review = db_review.create(user_id=current_user.id, entity_id=entity_id, entity_type=entity_type,
text=form.text.data, rating=form.rating.data, license_id=form.license_choice.data,
language=form.language.data, is_draft=is_draft)
if form.remember_license.data:
db_users.update(current_user.id, user_new_info={
"license_choice": form.license_choice.data,
})
if is_draft:
flash.success(gettext("Review has been saved!"))
else:
flash.success(gettext("Review has been published!"))
return redirect(url_for('.entity', id=review['id']))
try:
entity = get_entity_by_id(entity_id, entity_type)
except NoDataFoundException:
raise NotFound(gettext("Sorry, we couldn't find a %s with that MusicBrainz ID." % entity_type))
if not entity:
flash.error(gettext("You can only write a review for an entity that exists on MusicBrainz!"))
return redirect(url_for('search.selector', next=url_for('.create')))
if entity_type == 'release_group':
spotify_mappings = mbspotify.mappings(entity_id)
soundcloud_url = soundcloud.get_url(entity_id)
if not form.errors:
flash.info(gettext("Please provide some text or a rating for this review."))
return render_template('review/modify/write.html', form=form, entity_type=entity_type, entity=entity,
spotify_mappings=spotify_mappings, soundcloud_url=soundcloud_url)
entity_title = None
if 'title' in entity:
entity_title = entity['title']
elif 'name' in entity:
entity_title = entity['name']
if not form.errors:
flash.info(gettext("Please provide some text or a rating for this review."))
return render_template('review/modify/write.html', form=form, entity_type=entity_type,
entity_title=entity_title, entity=entity)
@review_bp.route('/<uuid:id>/edit', methods=('GET', 'POST'))
@login_required
def edit(id):
review = get_review_or_404(id)
if review["is_draft"] and current_user != review["user"]:
raise NotFound(gettext("Can't find a review with the specified ID."))
if review["user"] != current_user:
raise Unauthorized(gettext("Only author can edit this review."))
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
form = ReviewEditForm(default_license_id=review["license_id"], default_language=review["language"])
if not review["is_draft"]:
# Can't change license if review is published.
del form.license_choice
# Check if contents of the review are updated
if form.text.data == review['text'] and form.rating.data == review['rating']:
form.errors['edit'] = ["You must edit either text or rating to update the review."]
elif form.validate_on_submit():
if review["is_draft"]:
license_choice = form.license_choice.data
else:
license_choice = None
if form.text.data == '':
form.text.data = None
try:
db_review.update(
review_id=review["id"],
drafted=review["is_draft"],
text=form.text.data,
rating=form.rating.data,
is_draft=(form.state.data == 'draft'),
license_id=license_choice,
language=form.language.data,
)
except db_exceptions.BadDataException:
raise BadRequest(lazy_gettext("Changing license of a published review\
or converting a published review back to drafts is not allowed."))
flash.success(gettext("Review has been updated."))
return redirect(url_for('.entity', id=review["id"]))
else:
form.text.data = review["text"]
form.rating.data = review["rating"]
if review["entity_type"] == 'release_group':
spotify_mappings = mbspotify.mappings(str(review["entity_id"]))
soundcloud_url = soundcloud.get_url(str(review["entity_id"]))
return render_template('review/modify/edit.html', form=form, review=review, entity_type=review["entity_type"],
entity=entity, spotify_mappings=spotify_mappings, soundcloud_url=soundcloud_url)
return render_template('review/modify/edit.html', form=form, review=review, entity_type=review["entity_type"])
@review_bp.route('/write/get_language', methods=['POST'])
@login_required
def get_language():
"""Return the most likely language of the text."""
return detect(request.form['text'])
@review_bp.route('/<uuid:id>/delete', methods=['GET', 'POST'])
@login_required
def delete(id):
review = get_review_or_404(id)
if review["user"] != current_user and not current_user.is_admin():
raise Unauthorized(gettext("Only the author or an admin can delete this review."))
if request.method == 'POST':
db_review.delete(review["id"])
flash.success(gettext("Review has been deleted."))
return redirect(url_for('user.reviews', user_id=current_user.id))
return render_template('review/delete.html', review=review)
@review_bp.route('/<uuid:review_id>/vote', methods=['POST'])
@login_required
def vote_submit(review_id):
review_id = str(review_id)
if 'yes' in request.form:
vote = True
elif 'no' in request.form:
vote = False
else:
vote = None
review = get_review_or_404(review_id)
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
if review["user"] == current_user:
flash.error(gettext("You cannot rate your own review."))
return redirect(url_for('.entity', id=review_id))
if current_user.is_vote_limit_exceeded and not db_users.has_voted(current_user.id, review_id):
flash.error(gettext("You have exceeded your limit of votes per day."))
return redirect(url_for('.entity', id=review_id))
if current_user.is_blocked:
flash.error(gettext("You are not allowed to rate this review because "
"your account has been blocked by a moderator."))
return redirect(url_for('.entity', id=review_id))
db_vote.submit(
user_id=current_user.id,
revision_id=review["last_revision"]["id"],
vote=vote, # overwrites an existing vote, if needed
)
flash.success(gettext("You have rated this review!"))
return redirect(url_for('.entity', id=review_id))
@review_bp.route('/<uuid:id>/vote/delete', methods=['GET'])
@login_required
def vote_delete(id):
review = get_review_or_404(id)
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
try:
vote = db_vote.get(user_id=current_user.id, revision_id=review["last_revision"]["id"])
flash.success(gettext("You have deleted your vote for this review!"))
db_vote.delete(user_id=vote["user_id"], revision_id=vote["revision_id"])
except db_exceptions.NoDataFoundException:
flash.error(gettext("This review is not rated yet."))
return redirect(url_for('.entity', id=id))
@review_bp.route('/<uuid:id>/report', methods=['GET', 'POST'])
@login_required
def report(id):
review = get_review_or_404(id)
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
if review["user"] == current_user:
flash.error(gettext("You cannot report your own review."))
return redirect(url_for('.entity', id=id))
if current_user.is_blocked:
flash.error(gettext("You are not allowed to report this review because "
"your account has been blocked by a moderator."))
return redirect(url_for('.entity', id=id))
last_revision_id = review["last_revision"]["id"]
report = db_spam_report.get(current_user.id, last_revision_id)
if report:
flash.error(gettext("You have already reported this review."))
return redirect(url_for('.entity', id=id))
form = ReviewReportForm()
if form.validate_on_submit():
db_spam_report.create(last_revision_id, current_user.id, form.reason.data)
flash.success(gettext("Review has been reported."))
return redirect(url_for('.entity', id=id))
return render_template('review/report.html', review=review, form=form)
@review_bp.route('/<uuid:id>/hide', methods=['GET', 'POST'])
@login_required
@admin_view
def hide(id):
review = get_review_or_404(id)
if review["is_hidden"]:
flash.info(gettext("Review is already hidden."))
return redirect(url_for('.entity', id=review["id"]))
form = AdminActionForm()
if form.validate_on_submit():
db_review.set_hidden_state(review["id"], is_hidden=True)
db_moderation_log.create(admin_id=current_user.id, action=AdminActions.ACTION_HIDE_REVIEW,
reason=form.reason.data, review_id=review["id"])
review_reports, count = db_spam_report.list_reports(review_id=review["id"]) # pylint: disable=unused-variable
for report in review_reports:
db_spam_report.archive(report["user_id"], report["revision_id"])
flash.success(gettext("Review has been hidden."))
return redirect(url_for('.entity', id=review["id"]))
return render_template('log/action.html', review=review, form=form, action=AdminActions.ACTION_HIDE_REVIEW.value)
@review_bp.route('/<uuid:id>/unhide', methods=['GET', 'POST'])
@login_required
@admin_view
def unhide(id):
review = get_review_or_404(id)
if not review["is_hidden"]:
flash.info(gettext("Review is not hidden."))
return redirect(url_for('.entity', id=review["id"]))
form = AdminActionForm()
if form.validate_on_submit():
db_review.set_hidden_state(review["id"], is_hidden=False)
db_moderation_log.create(admin_id=current_user.id, action=AdminActions.ACTION_UNHIDE_REVIEW,
reason=form.reason.data, review_id=review["id"])
flash.success(gettext("Review is not hidden anymore."))
return redirect(url_for('.entity', id=review["id"]))
return render_template('log/action.html', review=review, form=form, action=AdminActions.ACTION_UNHIDE_REVIEW.value)
| 45.202041
| 123
| 0.678676
|
from math import ceil
from brainzutils.musicbrainz_db.exceptions import NoDataFoundException
from flask import Blueprint, render_template, request, redirect, url_for, jsonify
from flask_babel import gettext, get_locale, lazy_gettext
from flask_login import login_required, current_user
from langdetect import detect
from markdown import markdown
from werkzeug.exceptions import Unauthorized, NotFound, Forbidden, BadRequest
import critiquebrainz.db.comment as db_comment
import critiquebrainz.db.moderation_log as db_moderation_log
import critiquebrainz.db.review as db_review
import critiquebrainz.db.spam_report as db_spam_report
import critiquebrainz.db.users as db_users
from critiquebrainz.db import vote as db_vote, exceptions as db_exceptions, revision as db_revision
from critiquebrainz.db.moderation_log import AdminActions
from critiquebrainz.db.review import ENTITY_TYPES
from critiquebrainz.frontend import flash
from critiquebrainz.frontend.external import mbspotify, soundcloud
from critiquebrainz.frontend.external.musicbrainz_db.entities import get_multiple_entities, get_entity_by_id
from critiquebrainz.frontend.forms.comment import CommentEditForm
from critiquebrainz.frontend.forms.log import AdminActionForm
from critiquebrainz.frontend.forms.review import ReviewCreateForm, ReviewEditForm, ReviewReportForm
from critiquebrainz.frontend.login import admin_view
from critiquebrainz.frontend.views import get_avg_rating
from critiquebrainz.utils import side_by_side_diff
review_bp = Blueprint('review', __name__)
RESULTS_LIMIT = 10
def get_review_or_404(review_id):
try:
review = db_review.get_by_id(review_id)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("Can't find a review with ID: %(review_id)s!", review_id=review_id))
return review
@review_bp.route('/')
def browse():
entity_type = request.args.get('entity_type', default=None)
if entity_type == 'all':
entity_type = None
page = int(request.args.get('page', default=1))
if page < 1:
return redirect(url_for('.browse'))
limit = 3 * 9 # 9 rows
offset = (page - 1) * limit
reviews, count = db_review.list_reviews(sort='published_on', limit=limit, offset=offset, entity_type=entity_type)
if not reviews:
if page - 1 > count / limit:
return redirect(url_for('review.browse', page=int(ceil(count / limit))))
if not entity_type:
raise NotFound(gettext("No reviews to display."))
# Loading info about entities for reviews
entities = [(str(review["entity_id"]), review["entity_type"]) for review in reviews]
entities_info = get_multiple_entities(entities)
return render_template('review/browse.html', reviews=reviews, entities=entities_info,
page=page, limit=limit, count=count, entity_type=entity_type)
# TODO(psolanki): Refactor this function to remove PyLint warning.
# pylint: disable=too-many-branches
@review_bp.route('/<uuid:id>/revisions/<int:rev>')
@review_bp.route('/<uuid:id>')
def entity(id, rev=None):
review = get_review_or_404(id)
# Not showing review if it isn't published yet and not viewed by author.
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound(gettext("Can't find a review with the specified ID."))
if review["is_hidden"]:
if not current_user.is_admin():
raise Forbidden(gettext("Review has been hidden. "
"You need to be an administrator to view it."))
flash.warn(gettext("Review has been hidden."))
spotify_mappings = None
soundcloud_url = None
if review["entity_type"] == 'release_group':
spotify_mappings = mbspotify.mappings(str(review["entity_id"]))
soundcloud_url = soundcloud.get_url(str(review["entity_id"]))
count = db_revision.get_count(id)
if not rev:
rev = count
if rev < count:
flash.info(gettext('You are viewing an old revision, the review has been updated since then.'))
elif rev > count:
raise NotFound(gettext("The revision you are looking for does not exist."))
revision = db_revision.get(id, offset=count - rev)[0]
if not review["is_draft"] and current_user.is_authenticated:
# if user is logged in, get their vote for this review
try:
vote = db_vote.get(user_id=current_user.id, revision_id=revision['id'])
except db_exceptions.NoDataFoundException:
vote = None
else: # otherwise set vote to None, its value will not be used
vote = None
if revision["text"] is None:
review["text_html"] = None
else:
review["text_html"] = markdown(revision['text'], safe_mode="escape")
review["rating"] = revision["rating"]
user_all_reviews, _ = db_review.list_reviews(
user_id=review["user_id"],
sort="random",
exclude=[review["id"]],
)
other_reviews = user_all_reviews[:3]
avg_rating = get_avg_rating(review["entity_id"], review["entity_type"])
comments, count = db_comment.list_comments(review_id=id)
for comment in comments:
comment["text_html"] = markdown(comment["last_revision"]["text"], safe_mode="escape")
comment_form = CommentEditForm(review_id=id)
return render_template('review/entity/%s.html' % review["entity_type"], review=review,
spotify_mappings=spotify_mappings, soundcloud_url=soundcloud_url,
vote=vote, other_reviews=other_reviews, avg_rating=avg_rating,
comment_count=count, comments=comments, comment_form=comment_form)
@review_bp.route('/<uuid:review_id>/revision/<int:revision_id>')
def redirect_to_entity(review_id, revision_id):
try:
revision_number = db_revision.get_revision_number(review_id, revision_id)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("The revision you are looking for does not exist."))
return redirect(url_for('.entity', id=review_id, rev=revision_number))
@review_bp.route('/<uuid:id>/revisions/compare')
def compare(id):
review = get_review_or_404(id)
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound(gettext("Can't find a review with the specified ID."))
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
count = db_revision.get_count(id)
old, new = int(request.args.get('old') or count - 1), int(request.args.get('new') or count)
if old > count or new > count:
raise NotFound(gettext("The revision(s) you are looking for does not exist."))
if old > new:
return redirect(url_for('.compare', id=id, old=new, new=old))
left = db_revision.get(id, offset=count - old)[0]
right = db_revision.get(id, offset=count - new)[0]
left['number'], right['number'] = old, new
left['text'], right['text'] = side_by_side_diff(left['text'], right['text'])
return render_template('review/compare.html', review=review, left=left, right=right)
@review_bp.route('/<uuid:id>/revisions')
def revisions(id):
review = get_review_or_404(id)
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound("Can't find a review with the specified ID.")
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
try:
count = db_revision.get_count(id)
revisions = db_revision.get(id, limit=RESULTS_LIMIT)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("The revision(s) you are looking for does not exist."))
votes = db_revision.get_all_votes(id)
results = list(zip(reversed(range(count - RESULTS_LIMIT, count)), revisions))
return render_template('review/revisions.html', review=review, results=results,
count=count, limit=RESULTS_LIMIT, votes=votes)
@review_bp.route('/<uuid:id>/revisions/more')
def revisions_more(id):
review = get_review_or_404(id)
if review["is_draft"] and not (current_user.is_authenticated and
current_user == review["user"]):
raise NotFound("Can't find a review with the specified ID.")
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
page = int(request.args.get('page', default=0))
offset = page * RESULTS_LIMIT
try:
count = db_revision.get_count(id)
revisions = db_revision.get(id, limit=RESULTS_LIMIT, offset=offset)
except db_exceptions.NoDataFoundException:
raise NotFound(gettext("The revision(s) you are looking for does not exist."))
votes = db_revision.get_all_votes(id)
results = list(zip(reversed(range(count - offset - RESULTS_LIMIT, count - offset)), revisions))
template = render_template('review/revision_results.html', review=review, results=results, votes=votes, count=count)
return jsonify(results=template, more=(count - offset - RESULTS_LIMIT) > 0)
@review_bp.route('/write/<entity_type>/<entity_id>/', methods=('GET', 'POST'))
@review_bp.route('/write/')
@login_required
def create(entity_type=None, entity_id=None):
if not (entity_id or entity_type):
for allowed_type in ENTITY_TYPES:
if mbid := request.args.get(allowed_type):
entity_type = allowed_type
entity_id = mbid
break
if entity_type:
return redirect(url_for('.create', entity_type=entity_type, entity_id=entity_id))
flash.info(gettext("Please choose an entity to review."))
return redirect(url_for('search.selector', next=url_for('.create')))
if entity_type not in ENTITY_TYPES:
raise BadRequest("You can't write reviews about this type of entity.")
if current_user.is_blocked:
flash.error(gettext("You are not allowed to write new reviews because your "
"account has been blocked by a moderator."))
return redirect(url_for('user.reviews', user_id=current_user.id))
# Checking if the user already wrote a review for this entity
reviews, count = db_review.list_reviews(user_id=current_user.id, entity_id=entity_id, inc_drafts=True, inc_hidden=True)
review = reviews[0] if count != 0 else None
if review:
if review['is_draft']:
return redirect(url_for('review.edit', id=review['id']))
elif review['is_hidden']:
return redirect(url_for('review.entity', id=review['id']))
else:
flash.error(gettext("You have already published a review for this entity"))
return redirect(url_for('review.entity', id=review["id"]))
if current_user.is_review_limit_exceeded:
flash.error(gettext("You have exceeded your limit of reviews per day."))
return redirect(url_for('user.reviews', user_id=current_user.id))
form = ReviewCreateForm(default_license_id=current_user.license_choice, default_language=get_locale())
if form.validate_on_submit():
is_draft = form.state.data == 'draft'
if form.text.data == '':
form.text.data = None
review = db_review.create(user_id=current_user.id, entity_id=entity_id, entity_type=entity_type,
text=form.text.data, rating=form.rating.data, license_id=form.license_choice.data,
language=form.language.data, is_draft=is_draft)
if form.remember_license.data:
db_users.update(current_user.id, user_new_info={
"license_choice": form.license_choice.data,
})
if is_draft:
flash.success(gettext("Review has been saved!"))
else:
flash.success(gettext("Review has been published!"))
return redirect(url_for('.entity', id=review['id']))
try:
entity = get_entity_by_id(entity_id, entity_type)
except NoDataFoundException:
raise NotFound(gettext("Sorry, we couldn't find a %s with that MusicBrainz ID." % entity_type))
if not entity:
flash.error(gettext("You can only write a review for an entity that exists on MusicBrainz!"))
return redirect(url_for('search.selector', next=url_for('.create')))
if entity_type == 'release_group':
spotify_mappings = mbspotify.mappings(entity_id)
soundcloud_url = soundcloud.get_url(entity_id)
if not form.errors:
flash.info(gettext("Please provide some text or a rating for this review."))
return render_template('review/modify/write.html', form=form, entity_type=entity_type, entity=entity,
spotify_mappings=spotify_mappings, soundcloud_url=soundcloud_url)
entity_title = None
if 'title' in entity:
entity_title = entity['title']
elif 'name' in entity:
entity_title = entity['name']
if not form.errors:
flash.info(gettext("Please provide some text or a rating for this review."))
return render_template('review/modify/write.html', form=form, entity_type=entity_type,
entity_title=entity_title, entity=entity)
@review_bp.route('/<uuid:id>/edit', methods=('GET', 'POST'))
@login_required
def edit(id):
review = get_review_or_404(id)
if review["is_draft"] and current_user != review["user"]:
raise NotFound(gettext("Can't find a review with the specified ID."))
if review["user"] != current_user:
raise Unauthorized(gettext("Only author can edit this review."))
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
form = ReviewEditForm(default_license_id=review["license_id"], default_language=review["language"])
if not review["is_draft"]:
# Can't change license if review is published.
del form.license_choice
if form.text.data == review['text'] and form.rating.data == review['rating']:
form.errors['edit'] = ["You must edit either text or rating to update the review."]
elif form.validate_on_submit():
if review["is_draft"]:
license_choice = form.license_choice.data
else:
license_choice = None
if form.text.data == '':
form.text.data = None
try:
db_review.update(
review_id=review["id"],
drafted=review["is_draft"],
text=form.text.data,
rating=form.rating.data,
is_draft=(form.state.data == 'draft'),
license_id=license_choice,
language=form.language.data,
)
except db_exceptions.BadDataException:
raise BadRequest(lazy_gettext("Changing license of a published review\
or converting a published review back to drafts is not allowed."))
flash.success(gettext("Review has been updated."))
return redirect(url_for('.entity', id=review["id"]))
else:
form.text.data = review["text"]
form.rating.data = review["rating"]
if review["entity_type"] == 'release_group':
spotify_mappings = mbspotify.mappings(str(review["entity_id"]))
soundcloud_url = soundcloud.get_url(str(review["entity_id"]))
return render_template('review/modify/edit.html', form=form, review=review, entity_type=review["entity_type"],
entity=entity, spotify_mappings=spotify_mappings, soundcloud_url=soundcloud_url)
return render_template('review/modify/edit.html', form=form, review=review, entity_type=review["entity_type"])
@review_bp.route('/write/get_language', methods=['POST'])
@login_required
def get_language():
return detect(request.form['text'])
@review_bp.route('/<uuid:id>/delete', methods=['GET', 'POST'])
@login_required
def delete(id):
review = get_review_or_404(id)
if review["user"] != current_user and not current_user.is_admin():
raise Unauthorized(gettext("Only the author or an admin can delete this review."))
if request.method == 'POST':
db_review.delete(review["id"])
flash.success(gettext("Review has been deleted."))
return redirect(url_for('user.reviews', user_id=current_user.id))
return render_template('review/delete.html', review=review)
@review_bp.route('/<uuid:review_id>/vote', methods=['POST'])
@login_required
def vote_submit(review_id):
review_id = str(review_id)
if 'yes' in request.form:
vote = True
elif 'no' in request.form:
vote = False
else:
vote = None
review = get_review_or_404(review_id)
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
if review["user"] == current_user:
flash.error(gettext("You cannot rate your own review."))
return redirect(url_for('.entity', id=review_id))
if current_user.is_vote_limit_exceeded and not db_users.has_voted(current_user.id, review_id):
flash.error(gettext("You have exceeded your limit of votes per day."))
return redirect(url_for('.entity', id=review_id))
if current_user.is_blocked:
flash.error(gettext("You are not allowed to rate this review because "
"your account has been blocked by a moderator."))
return redirect(url_for('.entity', id=review_id))
db_vote.submit(
user_id=current_user.id,
revision_id=review["last_revision"]["id"],
vote=vote, )
flash.success(gettext("You have rated this review!"))
return redirect(url_for('.entity', id=review_id))
@review_bp.route('/<uuid:id>/vote/delete', methods=['GET'])
@login_required
def vote_delete(id):
review = get_review_or_404(id)
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
try:
vote = db_vote.get(user_id=current_user.id, revision_id=review["last_revision"]["id"])
flash.success(gettext("You have deleted your vote for this review!"))
db_vote.delete(user_id=vote["user_id"], revision_id=vote["revision_id"])
except db_exceptions.NoDataFoundException:
flash.error(gettext("This review is not rated yet."))
return redirect(url_for('.entity', id=id))
@review_bp.route('/<uuid:id>/report', methods=['GET', 'POST'])
@login_required
def report(id):
review = get_review_or_404(id)
if review["is_hidden"] and not current_user.is_admin():
raise NotFound(gettext("Review has been hidden."))
if review["user"] == current_user:
flash.error(gettext("You cannot report your own review."))
return redirect(url_for('.entity', id=id))
if current_user.is_blocked:
flash.error(gettext("You are not allowed to report this review because "
"your account has been blocked by a moderator."))
return redirect(url_for('.entity', id=id))
last_revision_id = review["last_revision"]["id"]
report = db_spam_report.get(current_user.id, last_revision_id)
if report:
flash.error(gettext("You have already reported this review."))
return redirect(url_for('.entity', id=id))
form = ReviewReportForm()
if form.validate_on_submit():
db_spam_report.create(last_revision_id, current_user.id, form.reason.data)
flash.success(gettext("Review has been reported."))
return redirect(url_for('.entity', id=id))
return render_template('review/report.html', review=review, form=form)
@review_bp.route('/<uuid:id>/hide', methods=['GET', 'POST'])
@login_required
@admin_view
def hide(id):
review = get_review_or_404(id)
if review["is_hidden"]:
flash.info(gettext("Review is already hidden."))
return redirect(url_for('.entity', id=review["id"]))
form = AdminActionForm()
if form.validate_on_submit():
db_review.set_hidden_state(review["id"], is_hidden=True)
db_moderation_log.create(admin_id=current_user.id, action=AdminActions.ACTION_HIDE_REVIEW,
reason=form.reason.data, review_id=review["id"])
review_reports, count = db_spam_report.list_reports(review_id=review["id"]) for report in review_reports:
db_spam_report.archive(report["user_id"], report["revision_id"])
flash.success(gettext("Review has been hidden."))
return redirect(url_for('.entity', id=review["id"]))
return render_template('log/action.html', review=review, form=form, action=AdminActions.ACTION_HIDE_REVIEW.value)
@review_bp.route('/<uuid:id>/unhide', methods=['GET', 'POST'])
@login_required
@admin_view
def unhide(id):
review = get_review_or_404(id)
if not review["is_hidden"]:
flash.info(gettext("Review is not hidden."))
return redirect(url_for('.entity', id=review["id"]))
form = AdminActionForm()
if form.validate_on_submit():
db_review.set_hidden_state(review["id"], is_hidden=False)
db_moderation_log.create(admin_id=current_user.id, action=AdminActions.ACTION_UNHIDE_REVIEW,
reason=form.reason.data, review_id=review["id"])
flash.success(gettext("Review is not hidden anymore."))
return redirect(url_for('.entity', id=review["id"]))
return render_template('log/action.html', review=review, form=form, action=AdminActions.ACTION_UNHIDE_REVIEW.value)
| true
| true
|
1c496954c9ff5125c6093492798868e790e4c9d0
| 1,004
|
py
|
Python
|
framework/modelhublib/imageconverters/sitkToNumpyConverter.py
|
modelhub-ai/modelhub-engine
|
81e893fb7669ee9912178346efbf828dd8c0410b
|
[
"MIT"
] | 6
|
2018-10-13T10:11:51.000Z
|
2022-02-21T08:28:10.000Z
|
framework/modelhublib/imageconverters/sitkToNumpyConverter.py
|
modelhub-ai/modelhub-docker
|
81e893fb7669ee9912178346efbf828dd8c0410b
|
[
"MIT"
] | 34
|
2018-03-06T16:25:10.000Z
|
2018-06-26T21:55:13.000Z
|
framework/modelhublib/imageconverters/sitkToNumpyConverter.py
|
modelhub-ai/modelhub-engine
|
81e893fb7669ee9912178346efbf828dd8c0410b
|
[
"MIT"
] | 3
|
2019-08-15T18:09:32.000Z
|
2022-02-16T07:55:27.000Z
|
import SimpleITK
import numpy as np
from .imageConverter import ImageConverter
class SitkToNumpyConverter(ImageConverter):
"""
Converts SimpltITK.Image objects to Numpy
"""
def _convert(self, image):
"""
Args:
image (SimpleITK.Image): Image object to convert.
Returns:
Input image object converted to numpy array with 4 dimensions [batchsize, z/color, height, width]
Raises:
IOError if input is not of type SimpleITK.Image or cannot be converted for other reasons.
"""
if isinstance(image, SimpleITK.Image):
return self.__convertToNumpy(image)
else:
raise IOError("Image is not of type \"SimpleITK.Image\".")
def __convertToNumpy(self, image):
npArr = SimpleITK.GetArrayFromImage(image)
if npArr.ndim == 2:
npArr = npArr[np.newaxis,:]
npArr = npArr[np.newaxis,:].astype(np.float32)
return npArr
| 27.888889
| 109
| 0.615538
|
import SimpleITK
import numpy as np
from .imageConverter import ImageConverter
class SitkToNumpyConverter(ImageConverter):
def _convert(self, image):
if isinstance(image, SimpleITK.Image):
return self.__convertToNumpy(image)
else:
raise IOError("Image is not of type \"SimpleITK.Image\".")
def __convertToNumpy(self, image):
npArr = SimpleITK.GetArrayFromImage(image)
if npArr.ndim == 2:
npArr = npArr[np.newaxis,:]
npArr = npArr[np.newaxis,:].astype(np.float32)
return npArr
| true
| true
|
1c49697d08ff8fc6969f3ffb49a5cca3fa09e575
| 6,405
|
py
|
Python
|
code/05-soz_subgraph.py
|
akashpattnaik/pre-ictal-similarity
|
85f963aa0c6d2d0a6e971ffa005c400e136a0a76
|
[
"MIT"
] | null | null | null |
code/05-soz_subgraph.py
|
akashpattnaik/pre-ictal-similarity
|
85f963aa0c6d2d0a6e971ffa005c400e136a0a76
|
[
"MIT"
] | null | null | null |
code/05-soz_subgraph.py
|
akashpattnaik/pre-ictal-similarity
|
85f963aa0c6d2d0a6e971ffa005c400e136a0a76
|
[
"MIT"
] | null | null | null |
# %%
# %load_ext autoreload
# %autoreload 2
# Imports and environment setup
import numpy as np
import sys
import os
from numpy.core.fromnumeric import sort
import pandas as pd
import json
from scipy.io import loadmat
import matplotlib.pyplot as plt
from tqdm import tqdm
from os.path import join as ospj
from scipy.stats import zscore
import time
from kneed import KneeLocator
from scipy.stats import mannwhitneyu
code_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(ospj(code_path, 'tools'))
from plot_spectrogram import plot_spectrogram
from movmean import movmean
from pull_sz_starts import pull_sz_starts
from pull_patient_localization import pull_patient_localization
from mpl_toolkits.axes_grid1 import make_axes_locatable
from time2ind import time2ind
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from sklearn.decomposition import NMF
from sklearn.metrics.cluster import adjusted_rand_score
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
# Get paths from config file and metadata
with open(ospj(code_path, "config.json")) as f:
config = json.load(f)
repo_path = config['repositoryPath']
metadata_path = config['metadataPath']
palette = config['lightColors']
DTW_FLAG = config['flags']["DTW_FLAG"]
electrodes = config['electrodes']
bands = config['bands']
data_path = ospj(repo_path, 'data')
figure_path = ospj(repo_path, 'figures')
metadata_fname = ospj(metadata_path, "DATA_MASTER.json")
with open(metadata_fname) as f:
metadata = json.load(f)['PATIENTS']
seizure_metadata = pd.read_excel(ospj(data_path, "seizure_metadata.xlsx"))
# flags
SAVE_PLOT = True
NMF_FLAG = True
FIXED_PREICTAL_SEC = 60 * 30
LEAD_SZ_WINDOW_SEC = (FIXED_PREICTAL_SEC + 60 * 15) # 15 min buffer
def soz_state(H, soz_electrodes, metric="max_all", is_zscore=False):
'''
soz_mask: soz electrodes are true and non_soz electrodes are false
metric: determines how to find soz state. max_all takes the state where soz
channels have higher bandpower in all frequency bands
'''
n_components = H.shape[0]
n_electrodes = soz_electrodes.shape[0]
# reshape to (component, frequency band, electrode)
component_arr = np.reshape(H, (n_components, -1, n_electrodes))
if is_zscore:
component_z = np.zeros(component_arr.shape)
for i_comp in range(n_components):
component_z[i_comp, :, :] = zscore(component_arr[i_comp, :, :], axis=1)
component_arr = component_z
# sort to put non-soz first
sort_soz_inds = np.argsort(soz_electrodes)
n_soz = np.sum(soz_electrodes)
n_non_soz = n_electrodes - n_soz
n_iter = 10000
u_stats = np.zeros(n_components)
null_z = np.zeros(n_components)
for i_comp in range(n_components):
# randomly resample electrodes and take the mean bandpower of sample
means = np.zeros(n_iter)
for iter in range(n_iter):
means[iter] = np.mean(component_arr[i_comp, :, np.random.choice(n_electrodes, n_soz)])
# append true soz
means = np.append(means, np.mean(component_arr[i_comp, :, soz_electrodes]))
# calculate z_score of true soz and save
null_z[i_comp] = zscore(means)[-1]
sz_u_stats = np.zeros(component_arr.shape[1])
for i in range(component_arr.shape[1]):
stat, p = mannwhitneyu(component_arr[i_comp][i, soz_electrodes], component_arr[i_comp][i, ~soz_electrodes])
sz_u_stats[i] = stat
u_stats[i_comp] = np.max(sz_u_stats)
pt_soz_state_resamp = np.argmax(np.abs(null_z))
pt_soz_state_u = np.argmax(u_stats)
pct_non_zero = np.sum(component_arr[pt_soz_state_u,:,:] == 0) / np.size(component_arr[pt_soz_state_u,:,:])
var = np.max(np.var(component_arr[pt_soz_state_u,:,:], axis=1))
return pt_soz_state_resamp, pt_soz_state_u, pct_non_zero, var
patient_localization_mat = loadmat(ospj(metadata_path, 'patient_localization_final.mat'))['patient_localization']
patients, labels, ignore, resect, gm_wm, coords, region, soz = pull_patient_localization(ospj(metadata_path, 'patient_localization_final.mat'))
# %%
# Plot the NMF subgraphs and expression
for index, row in seizure_metadata.iterrows():
# for index, row in patient_cohort.iterrows():
# if row['Ignore']:
# continue
pt = row["Patient"]
pt_data_path = ospj(data_path, pt)
sz_num = row["Seizure number"]
remaining_sz_ids = np.load(ospj(pt_data_path, "remaining_sz_ids.npy"))
if sz_num not in remaining_sz_ids:
continue
if row["Seizure category"] == "Other":
continue
print("Calculating dissimilarity for seizure {}, {}".format(sz_num, pt))
t_sec = np.load(ospj(pt_data_path, "lead_sz_t_sec_band-{}_elec-{}.npy".format(bands, electrodes)))
sz_id = np.load(ospj(pt_data_path, "lead_sz_sz_id_band-{}_elec-{}.npy".format(bands, electrodes)))
W = np.load(ospj(pt_data_path, "nmf_expression_band-{}_elec-{}_sz-{}.npy".format(bands, electrodes, sz_num)))
H = np.load(ospj(pt_data_path, "nmf_components_band-{}_elec-{}_sz_{}.npy".format(bands, electrodes, sz_num)))
n_components = H.shape[0]
# pull and format electrode metadata
electrodes_mat = loadmat(ospj(pt_data_path, "selected_electrodes_elec-{}.mat".format(electrodes)))
target_electrode_region_inds = electrodes_mat['targetElectrodesRegionInds'][0]
pt_index = patients.index(pt)
sz_starts = pull_sz_starts(pt, metadata)
# find seizure onset zone and state with most seizure onset zone
soz_electrodes = np.array(np.squeeze(soz[pt_index][target_electrode_region_inds, :]), dtype=bool)
pt_soz_state_resamp, pt_soz_state_u, pct_non_zero, var = soz_state(H, soz_electrodes)
seizure_metadata.at[index, 'SOZ Sensitive State (resampling)'] = pt_soz_state_resamp
seizure_metadata.at[index, 'SOZ Sensitive State (mann-whitney)'] = pt_soz_state_u
seizure_metadata.at[index, 'SOZ Sensitive State (mann-whitney)'] = pt_soz_state_u
seizure_metadata.at[index, 'Ratio of non-zero component entries'] = pct_non_zero
seizure_metadata.at[index, 'Maximum variance across bands'] = var
np.save(ospj(pt_data_path, "soz_electrodes_band-{}_elec-{}.npy".format(bands, electrodes)), soz_electrodes)
seizure_metadata.to_excel(ospj(data_path, "seizure_metadata_with_soz_subgraph.xlsx"))
# %%
| 38.584337
| 143
| 0.735207
|
import numpy as np
import sys
import os
from numpy.core.fromnumeric import sort
import pandas as pd
import json
from scipy.io import loadmat
import matplotlib.pyplot as plt
from tqdm import tqdm
from os.path import join as ospj
from scipy.stats import zscore
import time
from kneed import KneeLocator
from scipy.stats import mannwhitneyu
code_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(ospj(code_path, 'tools'))
from plot_spectrogram import plot_spectrogram
from movmean import movmean
from pull_sz_starts import pull_sz_starts
from pull_patient_localization import pull_patient_localization
from mpl_toolkits.axes_grid1 import make_axes_locatable
from time2ind import time2ind
from fastdtw import fastdtw
from scipy.spatial.distance import euclidean
from sklearn.decomposition import NMF
from sklearn.metrics.cluster import adjusted_rand_score
import warnings
from sklearn.exceptions import ConvergenceWarning
warnings.filterwarnings(action='ignore', category=ConvergenceWarning)
with open(ospj(code_path, "config.json")) as f:
config = json.load(f)
repo_path = config['repositoryPath']
metadata_path = config['metadataPath']
palette = config['lightColors']
DTW_FLAG = config['flags']["DTW_FLAG"]
electrodes = config['electrodes']
bands = config['bands']
data_path = ospj(repo_path, 'data')
figure_path = ospj(repo_path, 'figures')
metadata_fname = ospj(metadata_path, "DATA_MASTER.json")
with open(metadata_fname) as f:
metadata = json.load(f)['PATIENTS']
seizure_metadata = pd.read_excel(ospj(data_path, "seizure_metadata.xlsx"))
SAVE_PLOT = True
NMF_FLAG = True
FIXED_PREICTAL_SEC = 60 * 30
LEAD_SZ_WINDOW_SEC = (FIXED_PREICTAL_SEC + 60 * 15)
def soz_state(H, soz_electrodes, metric="max_all", is_zscore=False):
n_components = H.shape[0]
n_electrodes = soz_electrodes.shape[0]
component_arr = np.reshape(H, (n_components, -1, n_electrodes))
if is_zscore:
component_z = np.zeros(component_arr.shape)
for i_comp in range(n_components):
component_z[i_comp, :, :] = zscore(component_arr[i_comp, :, :], axis=1)
component_arr = component_z
sort_soz_inds = np.argsort(soz_electrodes)
n_soz = np.sum(soz_electrodes)
n_non_soz = n_electrodes - n_soz
n_iter = 10000
u_stats = np.zeros(n_components)
null_z = np.zeros(n_components)
for i_comp in range(n_components):
means = np.zeros(n_iter)
for iter in range(n_iter):
means[iter] = np.mean(component_arr[i_comp, :, np.random.choice(n_electrodes, n_soz)])
means = np.append(means, np.mean(component_arr[i_comp, :, soz_electrodes]))
null_z[i_comp] = zscore(means)[-1]
sz_u_stats = np.zeros(component_arr.shape[1])
for i in range(component_arr.shape[1]):
stat, p = mannwhitneyu(component_arr[i_comp][i, soz_electrodes], component_arr[i_comp][i, ~soz_electrodes])
sz_u_stats[i] = stat
u_stats[i_comp] = np.max(sz_u_stats)
pt_soz_state_resamp = np.argmax(np.abs(null_z))
pt_soz_state_u = np.argmax(u_stats)
pct_non_zero = np.sum(component_arr[pt_soz_state_u,:,:] == 0) / np.size(component_arr[pt_soz_state_u,:,:])
var = np.max(np.var(component_arr[pt_soz_state_u,:,:], axis=1))
return pt_soz_state_resamp, pt_soz_state_u, pct_non_zero, var
patient_localization_mat = loadmat(ospj(metadata_path, 'patient_localization_final.mat'))['patient_localization']
patients, labels, ignore, resect, gm_wm, coords, region, soz = pull_patient_localization(ospj(metadata_path, 'patient_localization_final.mat'))
for index, row in seizure_metadata.iterrows():
pt = row["Patient"]
pt_data_path = ospj(data_path, pt)
sz_num = row["Seizure number"]
remaining_sz_ids = np.load(ospj(pt_data_path, "remaining_sz_ids.npy"))
if sz_num not in remaining_sz_ids:
continue
if row["Seizure category"] == "Other":
continue
print("Calculating dissimilarity for seizure {}, {}".format(sz_num, pt))
t_sec = np.load(ospj(pt_data_path, "lead_sz_t_sec_band-{}_elec-{}.npy".format(bands, electrodes)))
sz_id = np.load(ospj(pt_data_path, "lead_sz_sz_id_band-{}_elec-{}.npy".format(bands, electrodes)))
W = np.load(ospj(pt_data_path, "nmf_expression_band-{}_elec-{}_sz-{}.npy".format(bands, electrodes, sz_num)))
H = np.load(ospj(pt_data_path, "nmf_components_band-{}_elec-{}_sz_{}.npy".format(bands, electrodes, sz_num)))
n_components = H.shape[0]
electrodes_mat = loadmat(ospj(pt_data_path, "selected_electrodes_elec-{}.mat".format(electrodes)))
target_electrode_region_inds = electrodes_mat['targetElectrodesRegionInds'][0]
pt_index = patients.index(pt)
sz_starts = pull_sz_starts(pt, metadata)
soz_electrodes = np.array(np.squeeze(soz[pt_index][target_electrode_region_inds, :]), dtype=bool)
pt_soz_state_resamp, pt_soz_state_u, pct_non_zero, var = soz_state(H, soz_electrodes)
seizure_metadata.at[index, 'SOZ Sensitive State (resampling)'] = pt_soz_state_resamp
seizure_metadata.at[index, 'SOZ Sensitive State (mann-whitney)'] = pt_soz_state_u
seizure_metadata.at[index, 'SOZ Sensitive State (mann-whitney)'] = pt_soz_state_u
seizure_metadata.at[index, 'Ratio of non-zero component entries'] = pct_non_zero
seizure_metadata.at[index, 'Maximum variance across bands'] = var
np.save(ospj(pt_data_path, "soz_electrodes_band-{}_elec-{}.npy".format(bands, electrodes)), soz_electrodes)
seizure_metadata.to_excel(ospj(data_path, "seizure_metadata_with_soz_subgraph.xlsx"))
| true
| true
|
1c4969f2e22ab20faedf093583573663bfaa39a7
| 2,013
|
py
|
Python
|
services/backend/thiamsu/forms.py
|
LKKTGB/thiamsu
|
f08d453c6b35c801c57f2501e42565da56900814
|
[
"MIT"
] | 10
|
2020-08-25T08:57:36.000Z
|
2021-12-31T01:04:18.000Z
|
services/backend/thiamsu/forms.py
|
LKKTGB/thiamsu
|
f08d453c6b35c801c57f2501e42565da56900814
|
[
"MIT"
] | 13
|
2020-04-26T08:41:30.000Z
|
2021-06-10T17:34:25.000Z
|
services/backend/thiamsu/forms.py
|
LKKTGB/thiamsu
|
f08d453c6b35c801c57f2501e42565da56900814
|
[
"MIT"
] | 1
|
2020-09-06T17:54:13.000Z
|
2020-09-06T17:54:13.000Z
|
from django import forms
from django.forms import formset_factory
from django.forms.formsets import BaseFormSet
from django.forms.widgets import HiddenInput
from thiamsu.utils import get_youtube_id_from_url
class SongAdminForm(forms.ModelForm):
def clean_youtube_url(self):
youtube_id = get_youtube_id_from_url(self.cleaned_data["youtube_url"])
if not youtube_id:
raise forms.ValidationError(
"Invalid URL: %(url)s",
code="invalid youtube url",
params={"url": self.cleaned_data["youtube_url"]},
)
return self.cleaned_data["youtube_url"]
class TranslationForm(forms.Form):
line_no = forms.IntegerField(widget=forms.HiddenInput)
lang = forms.CharField(max_length=5, widget=forms.HiddenInput)
content = forms.CharField(max_length=1000, required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["line_no"].widget.attrs["readonly"] = True
self.fields["lang"].widget.attrs["readonly"] = True
class BaseTranslationFormSet(BaseFormSet):
def __init__(self, original_lyrics=None, *args, **kwargs):
super().__init__(*args, **kwargs)
# set original lyric as label of each line
if not original_lyrics or len(original_lyrics) != len(self.forms):
return
for i, form in enumerate(self.forms):
form.fields["content"].label = original_lyrics[i]
TranslationFormSet = formset_factory(
TranslationForm, formset=BaseTranslationFormSet, extra=0
)
class SongReadonlyForm(forms.Form):
readonly = forms.BooleanField(required=False)
class UserFavoriteSongForm(forms.Form):
method = forms.ChoiceField(choices=[(m, m) for m in ("POST", "DELETE")])
song_id = forms.IntegerField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["method"].widget = HiddenInput()
self.fields["song_id"].widget = HiddenInput()
| 33.55
| 78
| 0.682067
|
from django import forms
from django.forms import formset_factory
from django.forms.formsets import BaseFormSet
from django.forms.widgets import HiddenInput
from thiamsu.utils import get_youtube_id_from_url
class SongAdminForm(forms.ModelForm):
def clean_youtube_url(self):
youtube_id = get_youtube_id_from_url(self.cleaned_data["youtube_url"])
if not youtube_id:
raise forms.ValidationError(
"Invalid URL: %(url)s",
code="invalid youtube url",
params={"url": self.cleaned_data["youtube_url"]},
)
return self.cleaned_data["youtube_url"]
class TranslationForm(forms.Form):
line_no = forms.IntegerField(widget=forms.HiddenInput)
lang = forms.CharField(max_length=5, widget=forms.HiddenInput)
content = forms.CharField(max_length=1000, required=False)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["line_no"].widget.attrs["readonly"] = True
self.fields["lang"].widget.attrs["readonly"] = True
class BaseTranslationFormSet(BaseFormSet):
def __init__(self, original_lyrics=None, *args, **kwargs):
super().__init__(*args, **kwargs)
if not original_lyrics or len(original_lyrics) != len(self.forms):
return
for i, form in enumerate(self.forms):
form.fields["content"].label = original_lyrics[i]
TranslationFormSet = formset_factory(
TranslationForm, formset=BaseTranslationFormSet, extra=0
)
class SongReadonlyForm(forms.Form):
readonly = forms.BooleanField(required=False)
class UserFavoriteSongForm(forms.Form):
method = forms.ChoiceField(choices=[(m, m) for m in ("POST", "DELETE")])
song_id = forms.IntegerField()
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields["method"].widget = HiddenInput()
self.fields["song_id"].widget = HiddenInput()
| true
| true
|
1c496b2598cdfd5fc69e5d28a1e867bb4e332220
| 2,682
|
py
|
Python
|
tests/test_16_cc_oauth2_service.py
|
peppelinux/JWTConnect-Python-OidcService
|
af979f45666bc47b62c69ddcbb199a15c7b96597
|
[
"Apache-2.0"
] | 1
|
2020-09-30T13:07:46.000Z
|
2020-09-30T13:07:46.000Z
|
tests/test_16_cc_oauth2_service.py
|
peppelinux/JWTConnect-Python-OidcService
|
af979f45666bc47b62c69ddcbb199a15c7b96597
|
[
"Apache-2.0"
] | null | null | null |
tests/test_16_cc_oauth2_service.py
|
peppelinux/JWTConnect-Python-OidcService
|
af979f45666bc47b62c69ddcbb199a15c7b96597
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from oidcservice.service_factory import service_factory
from oidcservice.service_context import ServiceContext
from oidcservice.state_interface import InMemoryStateDataBase
KEYDEF = [{"type": "EC", "crv": "P-256", "use": ["sig"]}]
class TestRP():
@pytest.fixture(autouse=True)
def create_service(self):
client_config = {
'client_id': 'client_id',
'client_secret': 'another password'
}
service_context = ServiceContext(config=client_config)
db = InMemoryStateDataBase()
self.service = {
'token': service_factory("CCAccessToken",
['oauth2/client_credentials', 'oauth2'],
state_db=db,
service_context=service_context),
'refresh_token': service_factory("CCRefreshAccessToken",
['oauth2/client_credentials',
'oauth2'],
state_db=db,
service_context=service_context)
}
self.service['token'].endpoint = 'https://example.com/token'
self.service['refresh_token'].endpoint = 'https://example.com/token'
def test_token_get_request(self):
request_args = {'grant_type': 'client_credentials'}
_srv = self.service['token']
_info = _srv.get_request_parameters(request_args=request_args)
assert _info['method'] == 'POST'
assert _info['url'] == 'https://example.com/token'
assert _info['body'] == 'grant_type=client_credentials'
assert _info['headers'] == {
'Authorization': 'Basic Y2xpZW50X2lkOmFub3RoZXIrcGFzc3dvcmQ=',
'Content-Type': 'application/x-www-form-urlencoded'
}
def test_refresh_token_get_request(self):
_srv = self.service['token']
_srv.update_service_context({
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter": "example_value"
})
_srv = self.service['refresh_token']
_info = _srv.get_request_parameters()
assert _info['method'] == 'POST'
assert _info['url'] == 'https://example.com/token'
assert _info[
'body'] == 'grant_type=refresh_token'
assert _info['headers'] == {
'Authorization': 'Bearer tGzv3JOkF0XG5Qx2TlKWIA',
'Content-Type': 'application/x-www-form-urlencoded'
}
| 41.90625
| 77
| 0.568978
|
import pytest
from oidcservice.service_factory import service_factory
from oidcservice.service_context import ServiceContext
from oidcservice.state_interface import InMemoryStateDataBase
KEYDEF = [{"type": "EC", "crv": "P-256", "use": ["sig"]}]
class TestRP():
@pytest.fixture(autouse=True)
def create_service(self):
client_config = {
'client_id': 'client_id',
'client_secret': 'another password'
}
service_context = ServiceContext(config=client_config)
db = InMemoryStateDataBase()
self.service = {
'token': service_factory("CCAccessToken",
['oauth2/client_credentials', 'oauth2'],
state_db=db,
service_context=service_context),
'refresh_token': service_factory("CCRefreshAccessToken",
['oauth2/client_credentials',
'oauth2'],
state_db=db,
service_context=service_context)
}
self.service['token'].endpoint = 'https://example.com/token'
self.service['refresh_token'].endpoint = 'https://example.com/token'
def test_token_get_request(self):
request_args = {'grant_type': 'client_credentials'}
_srv = self.service['token']
_info = _srv.get_request_parameters(request_args=request_args)
assert _info['method'] == 'POST'
assert _info['url'] == 'https://example.com/token'
assert _info['body'] == 'grant_type=client_credentials'
assert _info['headers'] == {
'Authorization': 'Basic Y2xpZW50X2lkOmFub3RoZXIrcGFzc3dvcmQ=',
'Content-Type': 'application/x-www-form-urlencoded'
}
def test_refresh_token_get_request(self):
_srv = self.service['token']
_srv.update_service_context({
"access_token": "2YotnFZFEjr1zCsicMWpAA",
"token_type": "example",
"expires_in": 3600,
"refresh_token": "tGzv3JOkF0XG5Qx2TlKWIA",
"example_parameter": "example_value"
})
_srv = self.service['refresh_token']
_info = _srv.get_request_parameters()
assert _info['method'] == 'POST'
assert _info['url'] == 'https://example.com/token'
assert _info[
'body'] == 'grant_type=refresh_token'
assert _info['headers'] == {
'Authorization': 'Bearer tGzv3JOkF0XG5Qx2TlKWIA',
'Content-Type': 'application/x-www-form-urlencoded'
}
| true
| true
|
1c496c2139c67302856260e7708094386979d059
| 1,100
|
py
|
Python
|
src/txamqp/test/test_heartbeat.py
|
sbraz/txamqp
|
10caf998dd8c05a7321cd10c24a83832bf58bd0c
|
[
"Apache-2.0"
] | 17
|
2016-12-20T13:21:18.000Z
|
2021-09-22T07:44:15.000Z
|
src/txamqp/test/test_heartbeat.py
|
sbraz/txamqp
|
10caf998dd8c05a7321cd10c24a83832bf58bd0c
|
[
"Apache-2.0"
] | 13
|
2017-07-05T07:52:33.000Z
|
2022-03-25T10:14:15.000Z
|
src/txamqp/test/test_heartbeat.py
|
sbraz/txamqp
|
10caf998dd8c05a7321cd10c24a83832bf58bd0c
|
[
"Apache-2.0"
] | 12
|
2017-06-27T18:48:20.000Z
|
2021-02-15T12:22:11.000Z
|
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from txamqp.testlib import TestBase
from txamqp.protocol import AMQClient
class SpyAMQClient(AMQClient):
called_reschedule_check = 0
called_send_hb = 0
def reschedule_check_heartbeat(self, dummy=None):
AMQClient.reschedule_check_heartbeat(self)
self.called_reschedule_check += 1
def send_heartbeat(self):
AMQClient.send_heartbeat(self)
self.called_send_hb += 1
class HeartbeatTests(TestBase):
"""
Tests handling of heartbeat frames
"""
heartbeat = 1
clientClass = SpyAMQClient
def test_heartbeat(self):
"""
Test that heartbeat frames are sent and received
"""
d = Deferred()
def check_pulse(_):
self.assertTrue(self.client.called_send_hb, "A heartbeat frame was recently sent")
self.assertTrue(self.client.called_reschedule_check, "A heartbeat frame was recently received")
d.addCallback(check_pulse)
reactor.callLater(3, d.callback, None)
return d
| 27.5
| 107
| 0.690909
|
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from txamqp.testlib import TestBase
from txamqp.protocol import AMQClient
class SpyAMQClient(AMQClient):
called_reschedule_check = 0
called_send_hb = 0
def reschedule_check_heartbeat(self, dummy=None):
AMQClient.reschedule_check_heartbeat(self)
self.called_reschedule_check += 1
def send_heartbeat(self):
AMQClient.send_heartbeat(self)
self.called_send_hb += 1
class HeartbeatTests(TestBase):
heartbeat = 1
clientClass = SpyAMQClient
def test_heartbeat(self):
d = Deferred()
def check_pulse(_):
self.assertTrue(self.client.called_send_hb, "A heartbeat frame was recently sent")
self.assertTrue(self.client.called_reschedule_check, "A heartbeat frame was recently received")
d.addCallback(check_pulse)
reactor.callLater(3, d.callback, None)
return d
| true
| true
|
1c496c2c582376bc0e7ee6a044286bdeda0d3676
| 25,695
|
py
|
Python
|
tools/management/commands/upload_excel_bias.py
|
protwis/protwis
|
da9a455499343ab4e12902b99dcc259cda4a8d38
|
[
"Apache-2.0"
] | 21
|
2016-01-20T09:33:14.000Z
|
2021-12-20T19:19:45.000Z
|
tools/management/commands/upload_excel_bias.py
|
protwis/protwis
|
da9a455499343ab4e12902b99dcc259cda4a8d38
|
[
"Apache-2.0"
] | 75
|
2016-02-26T16:29:58.000Z
|
2022-03-21T12:35:13.000Z
|
tools/management/commands/upload_excel_bias.py
|
protwis/protwis
|
da9a455499343ab4e12902b99dcc259cda4a8d38
|
[
"Apache-2.0"
] | 77
|
2016-01-22T08:44:26.000Z
|
2022-02-01T15:54:56.000Z
|
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import connection
from django.db import IntegrityError
from django.utils.text import slugify
from django.http import HttpResponse, JsonResponse
from decimal import Decimal
from build.management.commands.base_build import Command as BaseBuild
from common.tools import fetch_from_cache, save_to_cache, fetch_from_web_api
from residue.models import Residue
from protein.models import Protein, ProteinCouplings
from ligand.models import BiasedExperiment, ExperimentAssay, BiasedExperimentVendors, AnalyzedExperiment, ExperimentAssayAuthors, Ligand, LigandProperities, LigandType, LigandVendorLink
from mutation.models import Mutation
from ligand.functions import get_or_make_ligand
from common.models import WebLink, WebResource, Publication
from django.db import connection
import queue
import logging
import os
from datetime import datetime
import xlrd
import operator
import traceback
import time
import math
import pytz
import re
MISSING_PROTEINS = {}
SKIPPED = 0
class Command(BaseBuild):
mylog = logging.getLogger(__name__)
mylog.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('biasDataTest.log')
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
mylog.addHandler(file_handler)
help = 'Reads bias data and imports it'
structure_data_dir = os.sep.join([settings.DATA_DIR, 'ligand_data', 'bias_data'])
publication_cache = {}
ligand_cache = {}
data_all = []
def add_arguments(self, parser):
parser.add_argument('-p', '--proc',
type=int,
action='store',
dest='proc',
default=1,
help='Number of processes to run')
parser.add_argument('-f', '--filename',
action='append',
dest='filename',
help='Filename to import. Can be used multiple times')
parser.add_argument('-u', '--purge',
action='store_true',
dest='purge',
default=False,
help='Purge existing bias records')
parser.add_argument('--test_run', action='store_true', help='Skip this during a test run',
default=False)
def handle(self, *args, **options):
if options['test_run']:
print('Skipping in test run')
return
# delete any existing structure data
if options['purge']:
try:
print('Started purging bias data')
self.purge_bias_data()
print('Ended purging bias data')
except Exception as msg:
print(msg)
self.logger.error(msg)
# import the structure data
self.prepare_all_data(options['filename'])
try:
print('CREATING BIAS DATA')
print(options['filename'])
# self.prepare_all_data(options['filename'])
self.logger.info('COMPLETED CREATING BIAS')
except Exception as msg:
print('--error--', msg, '\n')
self.logger.info("The error appeared in def handle")
def purge_bias_data(self):
delete_bias_excel = BiasedExperiment.objects.all()
delete_bias_excel.delete()
delete_bias_experiment = AnalyzedExperiment.objects.all()
delete_bias_experiment.delete()
self.logger.info("Bias data purgedAk47aspirine1Ak47aspirine1Ak47aspirine1Ak47aspirine1")
def loaddatafromexcel(self, excelpath):
'''
Reads excel file (require specific excel sheet)
'''
num_rows = 0
try:
workbook = xlrd.open_workbook(excelpath)
worksheets = workbook.sheet_names()
temp = []
for worksheet_name in worksheets:
worksheet = workbook.sheet_by_name(worksheet_name)
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = 0 # skip first, otherwise -1
while curr_row < num_rows:
curr_row += 1
row = worksheet.row(curr_row)
curr_cell = -1
temprow = []
while curr_cell < num_cells:
curr_cell += 1
cell_value = worksheet.cell_value(curr_row, curr_cell)
cell_type = worksheet.cell_type(curr_row, curr_cell)
# fix wrong spaced cells
if cell_value == " ":
cell_value = ""
temprow.append(cell_value)
temp.append(temprow)
# if curr_row>10: break
return temp
except:
self.logger.info(
"The error appeared during reading the excel", num_rows)
def initialize_return_row(self,excel_row):
d = dict()
d['submitting_group'] = None
d['reference'] = None
d['ligand_name'] = None
d['ligand_type'] = None
d['ligand_id'] = None
d['ligand_reference'] = None
d['emax_ligand_name'] = None
d['emax_ligand_type'] = None
d['emax_ligand_id'] = None
d['receptor'] = None
d['receptor_uniprot_id'] = None
d['cell_line'] = None
d['protein'] = None
d['protein_assay'] = None
d['protein_assay_method'] = None
d['protein_time_resolved'] = None
d['protein_ligand_function'] = None
d['protein_mtype'] = None
d['protein_relation'] = None
d['protein_activity_quantity'] = None
d['protein_activity_quantity_unit'] = None
d['protein_activity_quality'] = None
d['protein_efficacy_measure'] = None
d['protein_efficacy_relation'] = None
d['protein_efficacy_quantity'] = 0.0
d['protein_efficacy_quantity_unit'] = None
d['pathway_bias_initial'] = None
d['pathway_bias'] = None
d['protein_activity_equation'] = None
d['protein_efficacy_equation'] = None
d['auxiliary_protein'] = None
d['source_file'] = excel_row
self.logger.info("empty dict created error")
return d
def return_row(self, r,excel_row):
d = self.initialize_return_row(excel_row)
d['submitting_group'] = r[0]
d['reference'] = r[1]
try:
d['ligand_name'] = str(int(r[4]))
except:
d['ligand_name'] = r[4]
d['ligand_type'] = r[5]
try:
d['ligand_id'] = int(r[6])
except:
d['ligand_id'] = r[6]
d['ligand_reference'] = r[7]
d['emax_ligand_name'] = r[8]
d['emax_ligand_type'] = r[9]
try:
d['emax_ligand_id'] = int(r[10])
except:
d['emax_ligand_id'] = r[10]
d['receptor'] = r[11].lower().strip()
d['receptor_uniprot_id'] = r[12]
d['cell_line'] = r[13]
d['protein'] = r[14].strip().replace('α','a').replace('β','B').replace('g','G').lower()
d['protein_assay'] = r[15].strip()
d['protein_assay_method'] = r[16]
d['protein_time_resolved'] = r[17]
d['protein_ligand_function'] = r[18]
d['protein_mtype'] = r[19]
d['protein_relation'] = r[20]
d['protein_activity_quantity_unit'] = r[22]
d['protein_activity_quality'] = r[23]
d['protein_efficacy_measure'] = r[24]
d['protein_efficacy_relation'] = r[25]
d['protein_efficacy_quantity_unit'] = r[27]
if r[21] is not None and r[21] != '':
d['protein_activity_quantity'] = r[21]
if r[26] is not None and r[26] != '':
d['protein_efficacy_quantity'] = r[26]
if r[28] is not None and r[28] != '':
try:
d['pathway_bias_initial'] = float(r[28])
except:
try:
d['pathway_bias_initial'] = float(r[28].replace('\U00002013', '-'))
except:
d['pathway_bias_initial'] = r[28]
self.logger.info("pathway_bias_initial error")
if r[29] is not None and r[29] != '':
try:
d['pathway_bias'] = float(r[29])
except:
try:
d['pathway_bias'] = float(r[29].replace('\U00002013', '-'))
except:
d['pathway_bias'] = None
d['auxiliary_protein'] = r[30]
d['source_file'] = excel_row
return d
def analyse_rows(self, rows, source_file):
"""
Reads excel rows one by one
"""
skipped = list()
# Analyse the rows from excel and assign the right headers
temp = []
for i, r in enumerate(rows, 1):
d = dict()
# code to skip rows in excel for faster testing
# if i < 7609:
# continue
# if i > 838:
# break
if i % 100 == 0:
print(i)
d = self.return_row(r=r,excel_row=i)
try:
d['protein_activity_quantity'] = re.sub('[^\d\.,]', '', d['protein_activity_quantity'])
d['protein_activity_quantity'] = round(float(d['protein_activity_quantity']),2)
except:
d['protein_activity_quantity'] = d['protein_activity_quantity']
try:
d['protein_efficacy_quantity'] = round(d['protein_efficacy_quantity'],0)
except:
d['protein_efficacy_quantity'] = d['protein_efficacy_quantity']
d['protein_activity_quantity'], d['protein_mtype'] = self.fetch_measurements(d['protein_activity_quantity'],
d['protein_mtype'],
d['protein_activity_quantity_unit'])
if (d['protein'] == '' or
d['protein'] == None):
if d['protein_assay'] == 'pERK1/2 activation' or d['protein_assay'] =="pERK1-2":
d['protein'] = 'pERK1-2'
family = self.define_g_family(d['protein'].lower(), d['protein_assay'])
pub = self.fetch_publication(d['reference'])
l = self.fetch_ligand(
d['ligand_id'], d['ligand_type'], d['ligand_name'], d['source_file'])
#fetch endogenous ligand
protein = self.fetch_protein(d['receptor'], d['source_file'])
# fetch reference_ligand
reference_ligand = self.fetch_ligand(
d['emax_ligand_id'], d['emax_ligand_type'], d['emax_ligand_name'], d['source_file'])
# fetch protein
protein = self.fetch_protein(d['receptor'], d['source_file'])
if protein == None:
skipped.append(d)
continue
end_ligand = self.fetch_endogenous(protein)
auxiliary_protein = self.fetch_protein(d['auxiliary_protein'], d['source_file'])
if l == None:
print('*************error row',d,l)
## TODO: check if it was already uploaded
experiment_entry = BiasedExperiment(submission_author=d['submitting_group'],
publication=pub,
ligand=l,
receptor=protein,
auxiliary_protein = auxiliary_protein,
endogenous_ligand = end_ligand,
ligand_source_id = d['ligand_id'],
ligand_source_type = d['ligand_type'],
)
# try:
experiment_entry.save()
self.fetch_vendor(l,experiment_entry)
# except:
# print('skipping line', d)
# continue
experiment_assay = ExperimentAssay(biased_experiment=experiment_entry,
signalling_protein=d['protein'],
family = family,
cell_line=d['cell_line'],
assay_type=d['protein_assay'],
assay_measure=d['protein_assay_method'],
assay_time_resolved=d['protein_time_resolved'],
ligand_function=d['protein_ligand_function'],
quantitive_measure_type=d['protein_mtype'],
quantitive_activity=d['protein_activity_quantity'],
quantitive_activity_sign=d['protein_activity_equation'],
quantitive_unit=d['protein_activity_quantity_unit'],
qualitative_activity=d['protein_activity_quality'],
quantitive_efficacy=d['protein_efficacy_quantity'],
efficacy_measure_type=d['protein_efficacy_measure'],
efficacy_sign=d['protein_efficacy_equation'],
efficacy_unit=d['protein_efficacy_quantity_unit'],
bias_reference=d['ligand_reference'],
bias_value=d['pathway_bias'],
bias_value_initial=d['pathway_bias_initial'],
emax_ligand_reference=reference_ligand
)
experiment_assay.save()
#fetch authors
self.fetch_publication_authors(pub,experiment_assay)
temp.append(d)
return temp
def fetch_publication_authors(self,publication, experiment_assay):
counter = 0
author_list = list()
if publication.authors != None:
for authors in publication.authors.split(','):
author_list.append(authors.strip())
author_list.reverse()
for i in author_list:
if counter < 3:
assay_author = ExperimentAssayAuthors(experiment = experiment_assay,
author=i)
assay_author.save()
counter=counter+1
# assay_author = ExperimentAssayAuthors(experiment = experiment_assay,
def fetch_measurements(self, potency, p_type, unit):
if potency is not None:
if p_type.lower() == 'pec50':
potency = 10**(potency*(-1))
p_type = 'EC50'
elif p_type.lower() == 'logec50':
potency = 10**(potency)
p_type = 'EC50'
elif p_type.lower() == 'pic50':
potency = 10**(potency*(-1))
p_type = 'IC50'
elif p_type.lower() == 'logic50':
potency = 10**(potency)
p_type = 'IC50'
if potency is not None:
if p_type.lower() == 'ec50':
if unit.lower() == 'nm':
potency = potency* 10**(-9)
elif unit.lower() == 'µm':
potency = potency* 10**(-6)
elif unit.lower() == 'pm':
potency = potency* 10**(-12)
elif unit.lower() == 'mm':
potency = potency* 10**(-3)
if p_type.lower() == 'ic50':
if unit.lower() == 'nm':
potency = potency* 10**(-9)
elif unit.lower() == 'µm':
potency = potency* 10**(-6)
elif unit.lower() == 'pm':
potency = potency* 10**(-12)
elif unit.lower() == 'mm':
potency = potency* 10**(-3)
return potency,p_type
else:
self.logger.info("potency convertion error")
return None, None
def define_g_family(self, protein, assay_type):
family = None
if (protein == 'b-arrestin' or
protein == 'b-arrestin-1 (non-visual arrestin-2)' or
protein == 'b-arrestin-2 (non-visual arrestin-3)'):
family = 'B-arr'
elif (protein == 'gi/o-family' or
protein == 'gai1' or
protein == 'gai2' or
protein == 'gai3' or
protein == 'gao' or
protein == 'gaoA' or
protein == 'gai' or
protein == 'gai1' or
protein == 'gai2' or
protein == 'gai3' or
protein == 'gai1/2' or
protein == 'gao' or
protein == 'gaoA' or
protein == 'gaoB' or
protein == 'gao1' or
protein == 'gat1' or
protein == 'gat2' or
protein == 'gat3' or
protein == 'gaz' or
protein == 'gaob'):
family = 'Gi/o'
elif (protein == 'gq-family' or
protein == 'ga12' or
protein==' gaq' or
protein=='gaq/11' or
protein=='gaq/14' or
protein=='gaq/15' or
protein=='gaq/16'):
family = 'Gq/11'
elif (protein == 'g12/13-family' or
protein == 'ga12' or
protein == 'ga13'):
family = 'G12/13'
elif (protein == 'gs-family' or
protein == 'gas' or
protein == 'gaolf'):
family = 'Gs'
elif (protein == 'pERK1/2 activation' or
protein =="perk1-2"):
family = 'pERK1-2'
elif (protein == '' or protein is None):
if assay_type == 'Ca2+ accumulation':
family = 'CA2'
def fetch_receptor_trunsducers(self, receptor):
primary = set()
temp = list()
try:
gprotein = ProteinCouplings.objects.filter(protein=receptor)
for x in gprotein:
if x.transduction and x.transduction == 'primary':
primary.add(x.g_protein.name)
for i in primary:
temp.append(str(i))
return temp
except:
self.logger.info('receptor not found error')
return None
def fetch_endogenous(self, protein):
try:
with connection.cursor() as cursor:
cursor.execute("SELECT * FROM protein_endogenous_ligands WHERE protein_id =%s", [protein.pk])
row = cursor.fetchone()
end_ligand = Ligand.objects.filter(id=row[2])
test = end_ligand.get()
return test
except:
self.logger.info("The error appeared in def fetch_endogenous")
return None
def fetch_vendor(self, ligand,experiment_entry):
temp = ligand
links = LigandVendorLink.objects.filter(lp=ligand.properities.id)
# vendor_count = 0
for x in links:
if x.vendor.name not in ['ZINC', 'ChEMBL', 'BindingDB', 'SureChEMBL', 'eMolecules', 'MolPort', 'PubChem']:
ligand_vendor = BiasedExperimentVendors(experiment=experiment_entry,
vendor=x)
ligand_vendor.save()
self.logger.info("ligand_vendor saved")
def fetch_protein(self,protein_from_excel, source):
"""
fetch receptor with Protein model
requires: protein id, source
"""
test = None
if Protein.objects.filter(entry_name=protein_from_excel):
protein = Protein.objects.filter(entry_name=protein_from_excel)
test = protein.get()
elif Protein.objects.filter(web_links__index=protein_from_excel, web_links__web_resource__slug='uniprot'):
protein1 = Protein.objects.filter(
web_links__index=protein_from_excel, web_links__web_resource__slug='uniprot')
test = protein1[0]
if test == None:
self.logger.info("fetch_protein error")
return test
def fetch_ligand(self, ligand_id, ligand_type, ligand_name, source_file):
"""
fetch ligands with Ligand model
requires: ligand id, ligand id type, ligand name
requires: source_file name
"""
l = None
try:
if ligand_id in self.ligand_cache:
l = self.ligand_cache[ligand_id]
else:
l = get_or_make_ligand(ligand_id, ligand_type, ligand_name)
self.ligand_cache[ligand_id] = l
if l == None:
l = self.create_empty_ligand(ligand_name)
except:
web_resource = WebResource.objects.get(slug='pubchem')
try:
l = Ligand.objects.get(properities__web_links__web_resource=web_resource,
properities__web_links__index=ligand_id)
except:
l = self.create_empty_ligand(ligand_name)
# print('null ligand', l)
return l
def fetch_publication(self, publication_doi):
"""
fetch publication with Publication model
requires: publication doi or pmid
"""
try:
float(publication_doi)
publication_doi = str(int(publication_doi))
except ValueError:
pass
if publication_doi.isdigit(): # assume pubmed
pub_type = 'pubmed'
else: # assume doi
pub_type = 'doi'
if publication_doi not in self.publication_cache:
pub = False
if pub_type == 'doi':
pub = Publication.get_or_create_from_doi(publication_doi)
elif pub_type == 'pubmed':
pub = Publication.get_or_create_from_pubmed(publication_doi)
if not pub:
self.mylog.debug(
"publication fetching error | module: fetch_publication. Row # is : " + str(publication_doi) + ' ' + pub_type)
self.publication_cache[publication_doi] = pub
else:
pub = self.publication_cache[publication_doi]
return pub
def fetch_experiment(self, publication, ligand, receptor, source):
"""
fetch receptor with Protein model
requires: protein id, source
"""
try:
experiment = AnalyzedExperiment.objects.filter(
publication=publication, ligand=ligand, receptor=receptor, source=source)
experiment = experiment.get()
return True
except Exception as msg:
experiment = None
self.mylog.exception(
"Experiment AnalyzedExperiment error | module: AnalyzedExperiment.")
return False
def prepare_all_data(self, filenames):
if not filenames:
filenames = os.listdir(self.structure_data_dir)
for source_file in filenames:
source_file_path = os.sep.join(
[self.structure_data_dir, source_file]).replace('//', '/')
if os.path.isfile(source_file_path) and source_file[0] != '.':
self.logger.info('Reading file {}'.format(source_file_path))
print('Reading file {}'.format(source_file_path))
# read the yaml file
rows = []
if source_file[-4:] == 'xlsx' or source_file[-3:] == 'xls':
if "~$" in source_file:
# ignore open excel files
continue
rows = self.loaddatafromexcel(source_file_path)
rows = self.analyse_rows(rows, source_file)
else:
self.mylog.debug('unknown format'.source_file)
continue
self.data_all += rows
print(len(self.data_all), " total data points")
print("Finished")
def create_empty_ligand(self, ligand_name):
# gtoplig webresource
lp = self.build_ligand_properties()
ligand = Ligand()
ligand.properities = lp
ligand.name = ligand_name
ligand.canonical = True
ligand.ambigious_alias = False
ligand.pdbe = None
try:
ligand.save()
except IntegrityError:
self.logger.info("empty ligand found")
return Ligand.objects.get(name=ligand_name, canonical=True)
return ligand
def build_ligand_properties(self):
lp = LigandProperities()
lt = LigandType.objects.get(name = 'small molecule')
lp.ligand_type = lt
lp.smiles = None
lp.inchikey = None
lp.sequence= None
lp.mw = None
lp.rotatable_bonds = None
lp.hacc = None
lp.hdon = None
lp.logp = None
lp.save()
self.logger.info("Could not create ligand, empty is returned")
return lp
| 39.96112
| 185
| 0.521502
|
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from django.db import connection
from django.db import IntegrityError
from django.utils.text import slugify
from django.http import HttpResponse, JsonResponse
from decimal import Decimal
from build.management.commands.base_build import Command as BaseBuild
from common.tools import fetch_from_cache, save_to_cache, fetch_from_web_api
from residue.models import Residue
from protein.models import Protein, ProteinCouplings
from ligand.models import BiasedExperiment, ExperimentAssay, BiasedExperimentVendors, AnalyzedExperiment, ExperimentAssayAuthors, Ligand, LigandProperities, LigandType, LigandVendorLink
from mutation.models import Mutation
from ligand.functions import get_or_make_ligand
from common.models import WebLink, WebResource, Publication
from django.db import connection
import queue
import logging
import os
from datetime import datetime
import xlrd
import operator
import traceback
import time
import math
import pytz
import re
MISSING_PROTEINS = {}
SKIPPED = 0
class Command(BaseBuild):
mylog = logging.getLogger(__name__)
mylog.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')
file_handler = logging.FileHandler('biasDataTest.log')
file_handler.setLevel(logging.ERROR)
file_handler.setFormatter(formatter)
mylog.addHandler(file_handler)
help = 'Reads bias data and imports it'
structure_data_dir = os.sep.join([settings.DATA_DIR, 'ligand_data', 'bias_data'])
publication_cache = {}
ligand_cache = {}
data_all = []
def add_arguments(self, parser):
parser.add_argument('-p', '--proc',
type=int,
action='store',
dest='proc',
default=1,
help='Number of processes to run')
parser.add_argument('-f', '--filename',
action='append',
dest='filename',
help='Filename to import. Can be used multiple times')
parser.add_argument('-u', '--purge',
action='store_true',
dest='purge',
default=False,
help='Purge existing bias records')
parser.add_argument('--test_run', action='store_true', help='Skip this during a test run',
default=False)
def handle(self, *args, **options):
if options['test_run']:
print('Skipping in test run')
return
if options['purge']:
try:
print('Started purging bias data')
self.purge_bias_data()
print('Ended purging bias data')
except Exception as msg:
print(msg)
self.logger.error(msg)
self.prepare_all_data(options['filename'])
try:
print('CREATING BIAS DATA')
print(options['filename'])
self.logger.info('COMPLETED CREATING BIAS')
except Exception as msg:
print('--error--', msg, '\n')
self.logger.info("The error appeared in def handle")
def purge_bias_data(self):
delete_bias_excel = BiasedExperiment.objects.all()
delete_bias_excel.delete()
delete_bias_experiment = AnalyzedExperiment.objects.all()
delete_bias_experiment.delete()
self.logger.info("Bias data purgedAk47aspirine1Ak47aspirine1Ak47aspirine1Ak47aspirine1")
def loaddatafromexcel(self, excelpath):
num_rows = 0
try:
workbook = xlrd.open_workbook(excelpath)
worksheets = workbook.sheet_names()
temp = []
for worksheet_name in worksheets:
worksheet = workbook.sheet_by_name(worksheet_name)
num_rows = worksheet.nrows - 1
num_cells = worksheet.ncols - 1
curr_row = 0 while curr_row < num_rows:
curr_row += 1
row = worksheet.row(curr_row)
curr_cell = -1
temprow = []
while curr_cell < num_cells:
curr_cell += 1
cell_value = worksheet.cell_value(curr_row, curr_cell)
cell_type = worksheet.cell_type(curr_row, curr_cell)
if cell_value == " ":
cell_value = ""
temprow.append(cell_value)
temp.append(temprow)
return temp
except:
self.logger.info(
"The error appeared during reading the excel", num_rows)
def initialize_return_row(self,excel_row):
d = dict()
d['submitting_group'] = None
d['reference'] = None
d['ligand_name'] = None
d['ligand_type'] = None
d['ligand_id'] = None
d['ligand_reference'] = None
d['emax_ligand_name'] = None
d['emax_ligand_type'] = None
d['emax_ligand_id'] = None
d['receptor'] = None
d['receptor_uniprot_id'] = None
d['cell_line'] = None
d['protein'] = None
d['protein_assay'] = None
d['protein_assay_method'] = None
d['protein_time_resolved'] = None
d['protein_ligand_function'] = None
d['protein_mtype'] = None
d['protein_relation'] = None
d['protein_activity_quantity'] = None
d['protein_activity_quantity_unit'] = None
d['protein_activity_quality'] = None
d['protein_efficacy_measure'] = None
d['protein_efficacy_relation'] = None
d['protein_efficacy_quantity'] = 0.0
d['protein_efficacy_quantity_unit'] = None
d['pathway_bias_initial'] = None
d['pathway_bias'] = None
d['protein_activity_equation'] = None
d['protein_efficacy_equation'] = None
d['auxiliary_protein'] = None
d['source_file'] = excel_row
self.logger.info("empty dict created error")
return d
def return_row(self, r,excel_row):
d = self.initialize_return_row(excel_row)
d['submitting_group'] = r[0]
d['reference'] = r[1]
try:
d['ligand_name'] = str(int(r[4]))
except:
d['ligand_name'] = r[4]
d['ligand_type'] = r[5]
try:
d['ligand_id'] = int(r[6])
except:
d['ligand_id'] = r[6]
d['ligand_reference'] = r[7]
d['emax_ligand_name'] = r[8]
d['emax_ligand_type'] = r[9]
try:
d['emax_ligand_id'] = int(r[10])
except:
d['emax_ligand_id'] = r[10]
d['receptor'] = r[11].lower().strip()
d['receptor_uniprot_id'] = r[12]
d['cell_line'] = r[13]
d['protein'] = r[14].strip().replace('α','a').replace('β','B').replace('g','G').lower()
d['protein_assay'] = r[15].strip()
d['protein_assay_method'] = r[16]
d['protein_time_resolved'] = r[17]
d['protein_ligand_function'] = r[18]
d['protein_mtype'] = r[19]
d['protein_relation'] = r[20]
d['protein_activity_quantity_unit'] = r[22]
d['protein_activity_quality'] = r[23]
d['protein_efficacy_measure'] = r[24]
d['protein_efficacy_relation'] = r[25]
d['protein_efficacy_quantity_unit'] = r[27]
if r[21] is not None and r[21] != '':
d['protein_activity_quantity'] = r[21]
if r[26] is not None and r[26] != '':
d['protein_efficacy_quantity'] = r[26]
if r[28] is not None and r[28] != '':
try:
d['pathway_bias_initial'] = float(r[28])
except:
try:
d['pathway_bias_initial'] = float(r[28].replace('\U00002013', '-'))
except:
d['pathway_bias_initial'] = r[28]
self.logger.info("pathway_bias_initial error")
if r[29] is not None and r[29] != '':
try:
d['pathway_bias'] = float(r[29])
except:
try:
d['pathway_bias'] = float(r[29].replace('\U00002013', '-'))
except:
d['pathway_bias'] = None
d['auxiliary_protein'] = r[30]
d['source_file'] = excel_row
return d
def analyse_rows(self, rows, source_file):
skipped = list()
temp = []
for i, r in enumerate(rows, 1):
d = dict()
if i % 100 == 0:
print(i)
d = self.return_row(r=r,excel_row=i)
try:
d['protein_activity_quantity'] = re.sub('[^\d\.,]', '', d['protein_activity_quantity'])
d['protein_activity_quantity'] = round(float(d['protein_activity_quantity']),2)
except:
d['protein_activity_quantity'] = d['protein_activity_quantity']
try:
d['protein_efficacy_quantity'] = round(d['protein_efficacy_quantity'],0)
except:
d['protein_efficacy_quantity'] = d['protein_efficacy_quantity']
d['protein_activity_quantity'], d['protein_mtype'] = self.fetch_measurements(d['protein_activity_quantity'],
d['protein_mtype'],
d['protein_activity_quantity_unit'])
if (d['protein'] == '' or
d['protein'] == None):
if d['protein_assay'] == 'pERK1/2 activation' or d['protein_assay'] =="pERK1-2":
d['protein'] = 'pERK1-2'
family = self.define_g_family(d['protein'].lower(), d['protein_assay'])
pub = self.fetch_publication(d['reference'])
l = self.fetch_ligand(
d['ligand_id'], d['ligand_type'], d['ligand_name'], d['source_file'])
protein = self.fetch_protein(d['receptor'], d['source_file'])
reference_ligand = self.fetch_ligand(
d['emax_ligand_id'], d['emax_ligand_type'], d['emax_ligand_name'], d['source_file'])
protein = self.fetch_protein(d['receptor'], d['source_file'])
if protein == None:
skipped.append(d)
continue
end_ligand = self.fetch_endogenous(protein)
auxiliary_protein = self.fetch_protein(d['auxiliary_protein'], d['source_file'])
if l == None:
print('*************error row',d,l)
experiment_entry = BiasedExperiment(submission_author=d['submitting_group'],
publication=pub,
ligand=l,
receptor=protein,
auxiliary_protein = auxiliary_protein,
endogenous_ligand = end_ligand,
ligand_source_id = d['ligand_id'],
ligand_source_type = d['ligand_type'],
)
experiment_entry.save()
self.fetch_vendor(l,experiment_entry)
experiment_assay = ExperimentAssay(biased_experiment=experiment_entry,
signalling_protein=d['protein'],
family = family,
cell_line=d['cell_line'],
assay_type=d['protein_assay'],
assay_measure=d['protein_assay_method'],
assay_time_resolved=d['protein_time_resolved'],
ligand_function=d['protein_ligand_function'],
quantitive_measure_type=d['protein_mtype'],
quantitive_activity=d['protein_activity_quantity'],
quantitive_activity_sign=d['protein_activity_equation'],
quantitive_unit=d['protein_activity_quantity_unit'],
qualitative_activity=d['protein_activity_quality'],
quantitive_efficacy=d['protein_efficacy_quantity'],
efficacy_measure_type=d['protein_efficacy_measure'],
efficacy_sign=d['protein_efficacy_equation'],
efficacy_unit=d['protein_efficacy_quantity_unit'],
bias_reference=d['ligand_reference'],
bias_value=d['pathway_bias'],
bias_value_initial=d['pathway_bias_initial'],
emax_ligand_reference=reference_ligand
)
experiment_assay.save()
self.fetch_publication_authors(pub,experiment_assay)
temp.append(d)
return temp
def fetch_publication_authors(self,publication, experiment_assay):
counter = 0
author_list = list()
if publication.authors != None:
for authors in publication.authors.split(','):
author_list.append(authors.strip())
author_list.reverse()
for i in author_list:
if counter < 3:
assay_author = ExperimentAssayAuthors(experiment = experiment_assay,
author=i)
assay_author.save()
counter=counter+1
def fetch_measurements(self, potency, p_type, unit):
if potency is not None:
if p_type.lower() == 'pec50':
potency = 10**(potency*(-1))
p_type = 'EC50'
elif p_type.lower() == 'logec50':
potency = 10**(potency)
p_type = 'EC50'
elif p_type.lower() == 'pic50':
potency = 10**(potency*(-1))
p_type = 'IC50'
elif p_type.lower() == 'logic50':
potency = 10**(potency)
p_type = 'IC50'
if potency is not None:
if p_type.lower() == 'ec50':
if unit.lower() == 'nm':
potency = potency* 10**(-9)
elif unit.lower() == 'µm':
potency = potency* 10**(-6)
elif unit.lower() == 'pm':
potency = potency* 10**(-12)
elif unit.lower() == 'mm':
potency = potency* 10**(-3)
if p_type.lower() == 'ic50':
if unit.lower() == 'nm':
potency = potency* 10**(-9)
elif unit.lower() == 'µm':
potency = potency* 10**(-6)
elif unit.lower() == 'pm':
potency = potency* 10**(-12)
elif unit.lower() == 'mm':
potency = potency* 10**(-3)
return potency,p_type
else:
self.logger.info("potency convertion error")
return None, None
def define_g_family(self, protein, assay_type):
family = None
if (protein == 'b-arrestin' or
protein == 'b-arrestin-1 (non-visual arrestin-2)' or
protein == 'b-arrestin-2 (non-visual arrestin-3)'):
family = 'B-arr'
elif (protein == 'gi/o-family' or
protein == 'gai1' or
protein == 'gai2' or
protein == 'gai3' or
protein == 'gao' or
protein == 'gaoA' or
protein == 'gai' or
protein == 'gai1' or
protein == 'gai2' or
protein == 'gai3' or
protein == 'gai1/2' or
protein == 'gao' or
protein == 'gaoA' or
protein == 'gaoB' or
protein == 'gao1' or
protein == 'gat1' or
protein == 'gat2' or
protein == 'gat3' or
protein == 'gaz' or
protein == 'gaob'):
family = 'Gi/o'
elif (protein == 'gq-family' or
protein == 'ga12' or
protein==' gaq' or
protein=='gaq/11' or
protein=='gaq/14' or
protein=='gaq/15' or
protein=='gaq/16'):
family = 'Gq/11'
elif (protein == 'g12/13-family' or
protein == 'ga12' or
protein == 'ga13'):
family = 'G12/13'
elif (protein == 'gs-family' or
protein == 'gas' or
protein == 'gaolf'):
family = 'Gs'
elif (protein == 'pERK1/2 activation' or
protein =="perk1-2"):
family = 'pERK1-2'
elif (protein == '' or protein is None):
if assay_type == 'Ca2+ accumulation':
family = 'CA2'
def fetch_receptor_trunsducers(self, receptor):
primary = set()
temp = list()
try:
gprotein = ProteinCouplings.objects.filter(protein=receptor)
for x in gprotein:
if x.transduction and x.transduction == 'primary':
primary.add(x.g_protein.name)
for i in primary:
temp.append(str(i))
return temp
except:
self.logger.info('receptor not found error')
return None
def fetch_endogenous(self, protein):
try:
with connection.cursor() as cursor:
cursor.execute("SELECT * FROM protein_endogenous_ligands WHERE protein_id =%s", [protein.pk])
row = cursor.fetchone()
end_ligand = Ligand.objects.filter(id=row[2])
test = end_ligand.get()
return test
except:
self.logger.info("The error appeared in def fetch_endogenous")
return None
def fetch_vendor(self, ligand,experiment_entry):
temp = ligand
links = LigandVendorLink.objects.filter(lp=ligand.properities.id)
for x in links:
if x.vendor.name not in ['ZINC', 'ChEMBL', 'BindingDB', 'SureChEMBL', 'eMolecules', 'MolPort', 'PubChem']:
ligand_vendor = BiasedExperimentVendors(experiment=experiment_entry,
vendor=x)
ligand_vendor.save()
self.logger.info("ligand_vendor saved")
def fetch_protein(self,protein_from_excel, source):
test = None
if Protein.objects.filter(entry_name=protein_from_excel):
protein = Protein.objects.filter(entry_name=protein_from_excel)
test = protein.get()
elif Protein.objects.filter(web_links__index=protein_from_excel, web_links__web_resource__slug='uniprot'):
protein1 = Protein.objects.filter(
web_links__index=protein_from_excel, web_links__web_resource__slug='uniprot')
test = protein1[0]
if test == None:
self.logger.info("fetch_protein error")
return test
def fetch_ligand(self, ligand_id, ligand_type, ligand_name, source_file):
l = None
try:
if ligand_id in self.ligand_cache:
l = self.ligand_cache[ligand_id]
else:
l = get_or_make_ligand(ligand_id, ligand_type, ligand_name)
self.ligand_cache[ligand_id] = l
if l == None:
l = self.create_empty_ligand(ligand_name)
except:
web_resource = WebResource.objects.get(slug='pubchem')
try:
l = Ligand.objects.get(properities__web_links__web_resource=web_resource,
properities__web_links__index=ligand_id)
except:
l = self.create_empty_ligand(ligand_name)
return l
def fetch_publication(self, publication_doi):
try:
float(publication_doi)
publication_doi = str(int(publication_doi))
except ValueError:
pass
if publication_doi.isdigit(): pub_type = 'pubmed'
else: pub_type = 'doi'
if publication_doi not in self.publication_cache:
pub = False
if pub_type == 'doi':
pub = Publication.get_or_create_from_doi(publication_doi)
elif pub_type == 'pubmed':
pub = Publication.get_or_create_from_pubmed(publication_doi)
if not pub:
self.mylog.debug(
"publication fetching error | module: fetch_publication. Row # is : " + str(publication_doi) + ' ' + pub_type)
self.publication_cache[publication_doi] = pub
else:
pub = self.publication_cache[publication_doi]
return pub
def fetch_experiment(self, publication, ligand, receptor, source):
try:
experiment = AnalyzedExperiment.objects.filter(
publication=publication, ligand=ligand, receptor=receptor, source=source)
experiment = experiment.get()
return True
except Exception as msg:
experiment = None
self.mylog.exception(
"Experiment AnalyzedExperiment error | module: AnalyzedExperiment.")
return False
def prepare_all_data(self, filenames):
if not filenames:
filenames = os.listdir(self.structure_data_dir)
for source_file in filenames:
source_file_path = os.sep.join(
[self.structure_data_dir, source_file]).replace('//', '/')
if os.path.isfile(source_file_path) and source_file[0] != '.':
self.logger.info('Reading file {}'.format(source_file_path))
print('Reading file {}'.format(source_file_path))
rows = []
if source_file[-4:] == 'xlsx' or source_file[-3:] == 'xls':
if "~$" in source_file:
continue
rows = self.loaddatafromexcel(source_file_path)
rows = self.analyse_rows(rows, source_file)
else:
self.mylog.debug('unknown format'.source_file)
continue
self.data_all += rows
print(len(self.data_all), " total data points")
print("Finished")
def create_empty_ligand(self, ligand_name):
lp = self.build_ligand_properties()
ligand = Ligand()
ligand.properities = lp
ligand.name = ligand_name
ligand.canonical = True
ligand.ambigious_alias = False
ligand.pdbe = None
try:
ligand.save()
except IntegrityError:
self.logger.info("empty ligand found")
return Ligand.objects.get(name=ligand_name, canonical=True)
return ligand
def build_ligand_properties(self):
lp = LigandProperities()
lt = LigandType.objects.get(name = 'small molecule')
lp.ligand_type = lt
lp.smiles = None
lp.inchikey = None
lp.sequence= None
lp.mw = None
lp.rotatable_bonds = None
lp.hacc = None
lp.hdon = None
lp.logp = None
lp.save()
self.logger.info("Could not create ligand, empty is returned")
return lp
| true
| true
|
1c496c984a5305a874109d556f037a1da44afd9d
| 363
|
py
|
Python
|
Day01-15/code/Day15/pdf2.py
|
EngrSaad2/Python-100-Days
|
ab0b26714b1df50d02a1433dc82f2a3fb025be5c
|
[
"Apache-2.0"
] | 6
|
2020-04-22T14:07:51.000Z
|
2021-09-07T12:55:23.000Z
|
Day01-15/code/Day15/pdf2.py
|
2462612540/Python-Language
|
a676d1274a04ff03f1aea0de9c58019d6ef8f5fe
|
[
"Apache-2.0"
] | null | null | null |
Day01-15/code/Day15/pdf2.py
|
2462612540/Python-Language
|
a676d1274a04ff03f1aea0de9c58019d6ef8f5fe
|
[
"Apache-2.0"
] | 4
|
2019-08-25T05:51:00.000Z
|
2021-04-16T08:14:16.000Z
|
"""
读取PDF文件
Version: 0.1
Author: 骆昊
Date: 2018-03-26
"""
from PyPDF2 import PdfFileReader
with open('./res/Python课程大纲.pdf', 'rb') as f:
reader = PdfFileReader(f, strict=False)
print(reader.numPages)
if reader.isEncrypted:
reader.decrypt('')
current_page = reader.getPage(5)
print(current_page)
print(current_page.extractText())
| 19.105263
| 45
| 0.680441
|
from PyPDF2 import PdfFileReader
with open('./res/Python课程大纲.pdf', 'rb') as f:
reader = PdfFileReader(f, strict=False)
print(reader.numPages)
if reader.isEncrypted:
reader.decrypt('')
current_page = reader.getPage(5)
print(current_page)
print(current_page.extractText())
| true
| true
|
1c496ca75c47d276175856efd760bf5ff55c3465
| 547
|
py
|
Python
|
augment/aug_insert_junk_chars.py
|
biubiubiiu/SpamClassification
|
c7159c77baf5f1ba09ce1af9fc0f7e0c10332864
|
[
"Apache-2.0"
] | null | null | null |
augment/aug_insert_junk_chars.py
|
biubiubiiu/SpamClassification
|
c7159c77baf5f1ba09ce1af9fc0f7e0c10332864
|
[
"Apache-2.0"
] | null | null | null |
augment/aug_insert_junk_chars.py
|
biubiubiiu/SpamClassification
|
c7159c77baf5f1ba09ce1af9fc0f7e0c10332864
|
[
"Apache-2.0"
] | 1
|
2022-03-01T13:10:46.000Z
|
2022-03-01T13:10:46.000Z
|
import random
from resources import list_junk_charaters
from .base_operation import BaseOperation
class InsertJunkCharacters(BaseOperation):
"""Insert meaningless a character into text"""
def __init__(self):
super(InsertJunkCharacters, self).__init__()
self.junk_chars = list_junk_charaters()
def can_replace(self, s):
return True
def transform(self, s):
idx = random.randint(0, len(s))
char_to_insert = random.choice(self.junk_chars)
return s[:idx] + char_to_insert + s[idx:]
| 26.047619
| 55
| 0.694698
|
import random
from resources import list_junk_charaters
from .base_operation import BaseOperation
class InsertJunkCharacters(BaseOperation):
def __init__(self):
super(InsertJunkCharacters, self).__init__()
self.junk_chars = list_junk_charaters()
def can_replace(self, s):
return True
def transform(self, s):
idx = random.randint(0, len(s))
char_to_insert = random.choice(self.junk_chars)
return s[:idx] + char_to_insert + s[idx:]
| true
| true
|
1c496dfc9ef80a210ba798d35c3fe379edc60e8a
| 5,072
|
py
|
Python
|
server/server.py
|
TwistedSim/CoupIO
|
f517fb52b0b1050066d60fd0b389238e247cc90f
|
[
"MIT"
] | 3
|
2020-12-07T00:03:26.000Z
|
2020-12-07T01:51:27.000Z
|
server/server.py
|
TwistedSim/CoupIO
|
f517fb52b0b1050066d60fd0b389238e247cc90f
|
[
"MIT"
] | null | null | null |
server/server.py
|
TwistedSim/CoupIO
|
f517fb52b0b1050066d60fd0b389238e247cc90f
|
[
"MIT"
] | 1
|
2020-12-05T17:35:16.000Z
|
2020-12-05T17:35:16.000Z
|
import asyncio
import inspect
import socketio
import random
from typing import Type
from games.game_interface import GameInterface, Game
class Server(socketio.AsyncNamespace):
current_games = {}
game_class = None
sio = None
start_lock = asyncio.Lock()
@classmethod
def configure(cls, sio: socketio.Server, game: Type[GameInterface]):
cls.game_class = game
cls.sio = sio
server_methods = [m[0] for m in inspect.getmembers(cls, predicate=inspect.isfunction) if m[0].startswith('on_')]
for method in inspect.getmembers(cls.game_class, predicate=inspect.ismethod):
if method[0] in server_methods:
raise NameError(f'A event handler for {method[0]} already exists in the server interface.')
if method[0].startswith('on_'):
cls.sio.on(method[0][3:], handler=method[1])
async def on_connect(self, sid, environ):
print(f'Client {sid} connected')
await self.sio.send(f'Connected to {Server.game_class.__name__} server', room=sid)
async def on_create_game(self, sid):
new_game = self.game_class(self.sio, sid)
self.current_games[new_game.uuid] = new_game
await self.sio.send(f'New game created', room=sid)
print(f'Client {sid} create a new game {new_game.uuid}')
return new_game.uuid
async def on_find_random_game(self, sid):
available_games = [
game for game in self.current_games.values() if game.is_valid]
if available_games:
return random.choice(available_games).uuid
else:
await self.sio.send(f'No game available')
async def on_join_game(self, sid, game_uuid):
game = self.current_games[game_uuid]
if len(self.sio.rooms(sid)) > 1:
await self.sio.send(f'You already are in game {self.sio.rooms(sid)[1]}', room=sid)
elif game_uuid not in self.current_games:
await self.sio.send(f'Game {game_uuid} does not exists', room=sid)
elif not game.is_valid:
await self.sio.send(f'Game {game_uuid} is not available', room=sid)
elif game.is_full:
await self.sio.send(f'Game {game_uuid} is full', room=sid)
else:
await game.add_player(sid)
self.sio.enter_room(sid, game_uuid)
await self.sio.send(f'Game {game_uuid} joined', room=sid)
await self.sio.send(f'A new player joined the game', room=game_uuid, skip_sid=sid)
await self.sio.emit('player_joined_game', (game_uuid, game.nb_player, False), room=game_uuid, skip_sid=game.owner)
await self.sio.emit('player_joined_game', (game_uuid, game.nb_player, True), room=game.owner)
print(f'Client {sid} join the game {game_uuid}')
async def leave(self, sid, game_uuid):
self.sio.leave_room(sid, game_uuid)
await self.current_games[game_uuid].remove_player(sid)
print(f'Client {sid} left game {game_uuid}')
await self.sio.send(f'Left room {game_uuid}', room=sid)
await self.sio.send('A player left the game', room=game_uuid)
if self.current_games[game_uuid].status == Game.Status.Running:
self.current_games[game_uuid].status = Game.Status.Aborted
elif sid == self.current_games[game_uuid].owner:
self.current_games[game_uuid].status = Game.Status.Aborted
print(f'Game {game_uuid} was closed by the owner')
await self.sio.send(f'Game {game_uuid} was close by owner', room=game_uuid)
elif self.current_games[game_uuid].nb_player == 0:
self.current_games[game_uuid].status = Game.Status.Aborted
print(f'Game {game_uuid} was removed since there is no player left')
if self.current_games[game_uuid].status == Game.Status.Aborted:
await self.sio.send(f'Game was aborted', room=game_uuid)
await self.sio.emit('game_aborted', game_uuid, room=game_uuid)
await self.sio.close_room(game_uuid)
async def on_disconnect(self, sid):
for game in self.sio.rooms(sid):
if game != sid:
await self.leave(sid, game)
print(f'Client {sid} disconnected')
async def on_start_game(self, sid, game_uuid):
async with self.start_lock:
game = self.current_games[game_uuid]
if game.owner != sid:
await self.sio.send(f'Only the owner of the game can start the game', room=sid)
elif not game.is_ready:
await self.sio.send(f'The game cannot start until it is ready', room=sid)
else:
await self.sio.send(f'Game {game.uuid} started', room=game_uuid)
print(f'Client {sid} started the game {game.uuid}')
# TODO start the game in another process
# TODO use different socket.io namespace according to the game
await game.start()
print(f'Game {game.uuid} is completed.')
await self.sio.close_room(game.uuid)
| 44.104348
| 126
| 0.638604
|
import asyncio
import inspect
import socketio
import random
from typing import Type
from games.game_interface import GameInterface, Game
class Server(socketio.AsyncNamespace):
current_games = {}
game_class = None
sio = None
start_lock = asyncio.Lock()
@classmethod
def configure(cls, sio: socketio.Server, game: Type[GameInterface]):
cls.game_class = game
cls.sio = sio
server_methods = [m[0] for m in inspect.getmembers(cls, predicate=inspect.isfunction) if m[0].startswith('on_')]
for method in inspect.getmembers(cls.game_class, predicate=inspect.ismethod):
if method[0] in server_methods:
raise NameError(f'A event handler for {method[0]} already exists in the server interface.')
if method[0].startswith('on_'):
cls.sio.on(method[0][3:], handler=method[1])
async def on_connect(self, sid, environ):
print(f'Client {sid} connected')
await self.sio.send(f'Connected to {Server.game_class.__name__} server', room=sid)
async def on_create_game(self, sid):
new_game = self.game_class(self.sio, sid)
self.current_games[new_game.uuid] = new_game
await self.sio.send(f'New game created', room=sid)
print(f'Client {sid} create a new game {new_game.uuid}')
return new_game.uuid
async def on_find_random_game(self, sid):
available_games = [
game for game in self.current_games.values() if game.is_valid]
if available_games:
return random.choice(available_games).uuid
else:
await self.sio.send(f'No game available')
async def on_join_game(self, sid, game_uuid):
game = self.current_games[game_uuid]
if len(self.sio.rooms(sid)) > 1:
await self.sio.send(f'You already are in game {self.sio.rooms(sid)[1]}', room=sid)
elif game_uuid not in self.current_games:
await self.sio.send(f'Game {game_uuid} does not exists', room=sid)
elif not game.is_valid:
await self.sio.send(f'Game {game_uuid} is not available', room=sid)
elif game.is_full:
await self.sio.send(f'Game {game_uuid} is full', room=sid)
else:
await game.add_player(sid)
self.sio.enter_room(sid, game_uuid)
await self.sio.send(f'Game {game_uuid} joined', room=sid)
await self.sio.send(f'A new player joined the game', room=game_uuid, skip_sid=sid)
await self.sio.emit('player_joined_game', (game_uuid, game.nb_player, False), room=game_uuid, skip_sid=game.owner)
await self.sio.emit('player_joined_game', (game_uuid, game.nb_player, True), room=game.owner)
print(f'Client {sid} join the game {game_uuid}')
async def leave(self, sid, game_uuid):
self.sio.leave_room(sid, game_uuid)
await self.current_games[game_uuid].remove_player(sid)
print(f'Client {sid} left game {game_uuid}')
await self.sio.send(f'Left room {game_uuid}', room=sid)
await self.sio.send('A player left the game', room=game_uuid)
if self.current_games[game_uuid].status == Game.Status.Running:
self.current_games[game_uuid].status = Game.Status.Aborted
elif sid == self.current_games[game_uuid].owner:
self.current_games[game_uuid].status = Game.Status.Aborted
print(f'Game {game_uuid} was closed by the owner')
await self.sio.send(f'Game {game_uuid} was close by owner', room=game_uuid)
elif self.current_games[game_uuid].nb_player == 0:
self.current_games[game_uuid].status = Game.Status.Aborted
print(f'Game {game_uuid} was removed since there is no player left')
if self.current_games[game_uuid].status == Game.Status.Aborted:
await self.sio.send(f'Game was aborted', room=game_uuid)
await self.sio.emit('game_aborted', game_uuid, room=game_uuid)
await self.sio.close_room(game_uuid)
async def on_disconnect(self, sid):
for game in self.sio.rooms(sid):
if game != sid:
await self.leave(sid, game)
print(f'Client {sid} disconnected')
async def on_start_game(self, sid, game_uuid):
async with self.start_lock:
game = self.current_games[game_uuid]
if game.owner != sid:
await self.sio.send(f'Only the owner of the game can start the game', room=sid)
elif not game.is_ready:
await self.sio.send(f'The game cannot start until it is ready', room=sid)
else:
await self.sio.send(f'Game {game.uuid} started', room=game_uuid)
print(f'Client {sid} started the game {game.uuid}')
await game.start()
print(f'Game {game.uuid} is completed.')
await self.sio.close_room(game.uuid)
| true
| true
|
1c496fe67e0d21c3e64a5837cd6d0721b4b6ee09
| 1,189
|
py
|
Python
|
tests/gogs_tools_tests/test_gogs_utils.py
|
mondele/tx-manager
|
ddbbeeae5990a327ffc14b42c478d3ea435c0533
|
[
"MIT"
] | 3
|
2017-03-17T02:25:21.000Z
|
2017-05-18T22:18:20.000Z
|
tests/gogs_tools_tests/test_gogs_utils.py
|
mondele/tx-manager
|
ddbbeeae5990a327ffc14b42c478d3ea435c0533
|
[
"MIT"
] | 184
|
2016-10-13T02:56:16.000Z
|
2021-03-25T21:27:20.000Z
|
tests/gogs_tools_tests/test_gogs_utils.py
|
mondele/tx-manager
|
ddbbeeae5990a327ffc14b42c478d3ea435c0533
|
[
"MIT"
] | 16
|
2016-09-15T23:34:19.000Z
|
2019-07-25T07:06:32.000Z
|
from __future__ import absolute_import, unicode_literals, print_function
import mock
import unittest
from libraries.gogs_tools.gogs_handler import GogsHandler
class GogsHandlerTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.handler = GogsHandler("https://www.example.com/")
cls.handler.gogs_api = mock.MagicMock()
def setUp(self):
"""Runs before each test."""
self.handler.gogs_api.reset_mock()
def test_authenticate_user_token(self):
def valid_auth(token):
return token.token == "valid"
self.handler.gogs_api.valid_authentication = valid_auth
self.assertTrue(self.handler.authenticate_user_token("valid"))
self.assertFalse(self.handler.authenticate_user_token("invalid"))
def test_get_user(self):
def valid_auth(token):
return token.token == "valid"
self.handler.gogs_api.valid_authentication = valid_auth
mock_user = mock.MagicMock()
self.handler.gogs_api.authenticated_user.return_value = mock_user
self.assertIs(self.handler.get_user("valid"), mock_user)
self.assertIsNone(self.handler.get_user("invalid"))
| 36.030303
| 73
| 0.707317
|
from __future__ import absolute_import, unicode_literals, print_function
import mock
import unittest
from libraries.gogs_tools.gogs_handler import GogsHandler
class GogsHandlerTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.handler = GogsHandler("https://www.example.com/")
cls.handler.gogs_api = mock.MagicMock()
def setUp(self):
self.handler.gogs_api.reset_mock()
def test_authenticate_user_token(self):
def valid_auth(token):
return token.token == "valid"
self.handler.gogs_api.valid_authentication = valid_auth
self.assertTrue(self.handler.authenticate_user_token("valid"))
self.assertFalse(self.handler.authenticate_user_token("invalid"))
def test_get_user(self):
def valid_auth(token):
return token.token == "valid"
self.handler.gogs_api.valid_authentication = valid_auth
mock_user = mock.MagicMock()
self.handler.gogs_api.authenticated_user.return_value = mock_user
self.assertIs(self.handler.get_user("valid"), mock_user)
self.assertIsNone(self.handler.get_user("invalid"))
| true
| true
|
1c49703f28f036f4d4ac9547a92dd0ad4100c1c4
| 1,229
|
py
|
Python
|
lib/lockfile.py
|
kaolin/rigor
|
c3489bf36088282368daee8fd71e9a64344145de
|
[
"BSD-2-Clause"
] | 5
|
2018-03-28T08:43:08.000Z
|
2021-10-30T15:47:07.000Z
|
lib/lockfile.py
|
blindsightcorp/rigor
|
d4176afed5b82cef3daf778ed00fe9be66d231fb
|
[
"BSD-2-Clause"
] | 2
|
2016-10-10T19:10:26.000Z
|
2017-05-03T23:01:37.000Z
|
lib/lockfile.py
|
kaolin/rigor
|
c3489bf36088282368daee8fd71e9a64344145de
|
[
"BSD-2-Clause"
] | 7
|
2016-05-25T00:15:43.000Z
|
2017-06-26T17:32:45.000Z
|
""" File used to synchronize operations between processes """
import os
class LockFile(object):
"""
Use this to lock operations that need to occur only once, even if several
processes try to run the operation. It works by getting an exclusive lock on
the listed file. It will fail with an exception if the lock already is held
by some other process. Note that the lock is reentrant for any code sharing
the same instance of this class.
Usage:
>>> with LockFile('/tmp/rigor-foo.lock') as lock:
... # do critical stuff...
... pass
"""
def __init__(self, path):
self._path = path
self._lock = None
def acquire(self):
"""
Acquires a reentrant lock. If the lock already exists in this method, it
will simply return; otherwise, it will acquire the lock. It will throw an
exception if the lock cannot be acquired.
"""
if not self._lock:
self._lock = os.open(self._path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
def release(self):
"""
Releases the lock and removes the file from disk.
"""
if self._lock:
os.close(self._lock)
os.unlink(self._path)
def __enter__(self):
self.acquire()
return self
def __exit__(self, _exc_type, _exc_value, _exc_traceback):
self.release()
| 26.717391
| 78
| 0.703824
|
import os
class LockFile(object):
def __init__(self, path):
self._path = path
self._lock = None
def acquire(self):
if not self._lock:
self._lock = os.open(self._path, os.O_CREAT | os.O_EXCL | os.O_RDWR)
def release(self):
if self._lock:
os.close(self._lock)
os.unlink(self._path)
def __enter__(self):
self.acquire()
return self
def __exit__(self, _exc_type, _exc_value, _exc_traceback):
self.release()
| true
| true
|
1c4970531f6fba9cef04cbc507d9376efaba246c
| 416
|
py
|
Python
|
products/migrations/0004_auto_20180914_2257.py
|
bubaic/e-shop
|
d0156d02d6e74e35d115f8742b55809466126513
|
[
"MIT"
] | 1
|
2022-02-21T18:00:48.000Z
|
2022-02-21T18:00:48.000Z
|
products/migrations/0004_auto_20180914_2257.py
|
bubaic/e-shop
|
d0156d02d6e74e35d115f8742b55809466126513
|
[
"MIT"
] | null | null | null |
products/migrations/0004_auto_20180914_2257.py
|
bubaic/e-shop
|
d0156d02d6e74e35d115f8742b55809466126513
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.1 on 2018-09-14 17:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_product_image'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.FileField(blank=True, null=True, upload_to='get_image_path'),
),
]
| 21.894737
| 86
| 0.610577
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0003_product_image'),
]
operations = [
migrations.AlterField(
model_name='product',
name='image',
field=models.FileField(blank=True, null=True, upload_to='get_image_path'),
),
]
| true
| true
|
1c49712da3c586e84e204bc68db748b83fe51cbd
| 164
|
py
|
Python
|
01_Day_Introduction/euclidian_distance.py
|
fernandovicentinpavanello/30-days-of-Python
|
3e04ef64a0997bb71eeac57911e47f2f6414ae75
|
[
"MIT"
] | 1
|
2022-03-08T07:08:39.000Z
|
2022-03-08T07:08:39.000Z
|
01_Day_Introduction/euclidian_distance.py
|
luizpavanello/30-days-of-Python
|
3c727a76b6185a5ba684c393c5cdfc759c3c4b01
|
[
"MIT"
] | null | null | null |
01_Day_Introduction/euclidian_distance.py
|
luizpavanello/30-days-of-Python
|
3c727a76b6185a5ba684c393c5cdfc759c3c4b01
|
[
"MIT"
] | null | null | null |
# Python Euclidian Distance using math.dist
from math import dist
point_1 = (2, 3)
point_2 = (10, 8)
print(dist(point_1, point_2))
# Result: 9.433981132056603
| 14.909091
| 43
| 0.719512
|
from math import dist
point_1 = (2, 3)
point_2 = (10, 8)
print(dist(point_1, point_2))
| true
| true
|
1c4972132ffd5f30ca850ea943fd539eece66d4f
| 4,004
|
py
|
Python
|
wheat/wallet/util/trade_utils.py
|
grayfallstown/wheat-blockchain
|
f391cdd30a0cbcdb2adf4439a25581fd28b42c1f
|
[
"Apache-2.0"
] | 15
|
2021-07-12T14:27:42.000Z
|
2022-02-09T04:32:44.000Z
|
wheat/wallet/util/trade_utils.py
|
grayfallstown/wheat-blockchain
|
f391cdd30a0cbcdb2adf4439a25581fd28b42c1f
|
[
"Apache-2.0"
] | 21
|
2021-07-12T23:25:36.000Z
|
2021-10-29T23:19:55.000Z
|
wheat/wallet/util/trade_utils.py
|
grayfallstown/wheat-blockchain
|
f391cdd30a0cbcdb2adf4439a25581fd28b42c1f
|
[
"Apache-2.0"
] | 8
|
2021-07-12T13:15:19.000Z
|
2022-03-15T08:41:18.000Z
|
from typing import Dict, Optional, Tuple
from wheat.types.blockchain_format.program import Program, INFINITE_COST
from wheat.types.condition_opcodes import ConditionOpcode
from wheat.types.spend_bundle import SpendBundle
from wheat.util.condition_tools import conditions_dict_for_solution
from wheat.wallet.cc_wallet import cc_utils
from wheat.wallet.trade_record import TradeRecord
from wheat.wallet.trading.trade_status import TradeStatus
def trade_status_ui_string(status: TradeStatus):
if status is TradeStatus.PENDING_CONFIRM:
return "Pending Confirmation"
elif status is TradeStatus.CANCELED:
return "Canceled"
elif status is TradeStatus.CONFIRMED:
return "Confirmed"
elif status is TradeStatus.PENDING_CANCEL:
return "Pending Cancellation"
elif status is TradeStatus.FAILED:
return "Failed"
elif status is TradeStatus.PENDING_ACCEPT:
return "Pending"
def trade_record_to_dict(record: TradeRecord) -> Dict:
"""Convenience function to return only part of trade record we care about and show correct status to the ui"""
result = {}
result["trade_id"] = record.trade_id.hex()
result["sent"] = record.sent
result["my_offer"] = record.my_offer
result["created_at_time"] = record.created_at_time
result["accepted_at_time"] = record.accepted_at_time
result["confirmed_at_index"] = record.confirmed_at_index
result["status"] = trade_status_ui_string(TradeStatus(record.status))
success, offer_dict, error = get_discrepancies_for_spend_bundle(record.spend_bundle)
if success is False or offer_dict is None:
raise ValueError(error)
result["offer_dict"] = offer_dict
return result
# Returns the relative difference in value between the amount outputted by a puzzle and solution and a coin's amount
def get_output_discrepancy_for_puzzle_and_solution(coin, puzzle, solution):
discrepancy = coin.amount - get_output_amount_for_puzzle_and_solution(puzzle, solution)
return discrepancy
# Returns the amount of value outputted by a puzzle and solution
def get_output_amount_for_puzzle_and_solution(puzzle: Program, solution: Program) -> int:
error, conditions, cost = conditions_dict_for_solution(puzzle, solution, INFINITE_COST)
total = 0
if conditions:
for _ in conditions.get(ConditionOpcode.CREATE_COIN, []):
total += Program.to(_.vars[1]).as_int()
return total
def get_discrepancies_for_spend_bundle(
trade_offer: SpendBundle,
) -> Tuple[bool, Optional[Dict], Optional[Exception]]:
try:
cc_discrepancies: Dict[str, int] = dict()
for coinsol in trade_offer.coin_spends:
puzzle: Program = Program.from_bytes(bytes(coinsol.puzzle_reveal))
solution: Program = Program.from_bytes(bytes(coinsol.solution))
# work out the deficits between coin amount and expected output for each
r = cc_utils.uncurry_cc(puzzle)
if r:
# Calculate output amounts
mod_hash, genesis_checker, inner_puzzle = r
innersol = solution.first()
total = get_output_amount_for_puzzle_and_solution(inner_puzzle, innersol)
colour = bytes(genesis_checker).hex()
if colour in cc_discrepancies:
cc_discrepancies[colour] += coinsol.coin.amount - total
else:
cc_discrepancies[colour] = coinsol.coin.amount - total
else:
coin_amount = coinsol.coin.amount
out_amount = get_output_amount_for_puzzle_and_solution(puzzle, solution)
diff = coin_amount - out_amount
if "wheat" in cc_discrepancies:
cc_discrepancies["wheat"] = cc_discrepancies["wheat"] + diff
else:
cc_discrepancies["wheat"] = diff
return True, cc_discrepancies, None
except Exception as e:
return False, None, e
| 42.595745
| 116
| 0.699051
|
from typing import Dict, Optional, Tuple
from wheat.types.blockchain_format.program import Program, INFINITE_COST
from wheat.types.condition_opcodes import ConditionOpcode
from wheat.types.spend_bundle import SpendBundle
from wheat.util.condition_tools import conditions_dict_for_solution
from wheat.wallet.cc_wallet import cc_utils
from wheat.wallet.trade_record import TradeRecord
from wheat.wallet.trading.trade_status import TradeStatus
def trade_status_ui_string(status: TradeStatus):
if status is TradeStatus.PENDING_CONFIRM:
return "Pending Confirmation"
elif status is TradeStatus.CANCELED:
return "Canceled"
elif status is TradeStatus.CONFIRMED:
return "Confirmed"
elif status is TradeStatus.PENDING_CANCEL:
return "Pending Cancellation"
elif status is TradeStatus.FAILED:
return "Failed"
elif status is TradeStatus.PENDING_ACCEPT:
return "Pending"
def trade_record_to_dict(record: TradeRecord) -> Dict:
result = {}
result["trade_id"] = record.trade_id.hex()
result["sent"] = record.sent
result["my_offer"] = record.my_offer
result["created_at_time"] = record.created_at_time
result["accepted_at_time"] = record.accepted_at_time
result["confirmed_at_index"] = record.confirmed_at_index
result["status"] = trade_status_ui_string(TradeStatus(record.status))
success, offer_dict, error = get_discrepancies_for_spend_bundle(record.spend_bundle)
if success is False or offer_dict is None:
raise ValueError(error)
result["offer_dict"] = offer_dict
return result
def get_output_discrepancy_for_puzzle_and_solution(coin, puzzle, solution):
discrepancy = coin.amount - get_output_amount_for_puzzle_and_solution(puzzle, solution)
return discrepancy
# Returns the amount of value outputted by a puzzle and solution
def get_output_amount_for_puzzle_and_solution(puzzle: Program, solution: Program) -> int:
error, conditions, cost = conditions_dict_for_solution(puzzle, solution, INFINITE_COST)
total = 0
if conditions:
for _ in conditions.get(ConditionOpcode.CREATE_COIN, []):
total += Program.to(_.vars[1]).as_int()
return total
def get_discrepancies_for_spend_bundle(
trade_offer: SpendBundle,
) -> Tuple[bool, Optional[Dict], Optional[Exception]]:
try:
cc_discrepancies: Dict[str, int] = dict()
for coinsol in trade_offer.coin_spends:
puzzle: Program = Program.from_bytes(bytes(coinsol.puzzle_reveal))
solution: Program = Program.from_bytes(bytes(coinsol.solution))
# work out the deficits between coin amount and expected output for each
r = cc_utils.uncurry_cc(puzzle)
if r:
# Calculate output amounts
mod_hash, genesis_checker, inner_puzzle = r
innersol = solution.first()
total = get_output_amount_for_puzzle_and_solution(inner_puzzle, innersol)
colour = bytes(genesis_checker).hex()
if colour in cc_discrepancies:
cc_discrepancies[colour] += coinsol.coin.amount - total
else:
cc_discrepancies[colour] = coinsol.coin.amount - total
else:
coin_amount = coinsol.coin.amount
out_amount = get_output_amount_for_puzzle_and_solution(puzzle, solution)
diff = coin_amount - out_amount
if "wheat" in cc_discrepancies:
cc_discrepancies["wheat"] = cc_discrepancies["wheat"] + diff
else:
cc_discrepancies["wheat"] = diff
return True, cc_discrepancies, None
except Exception as e:
return False, None, e
| true
| true
|
1c4973a004a9278329b4a2ea713e7f3e1c39f8cc
| 10,727
|
py
|
Python
|
venv/Lib/site-packages/selenium/webdriver/firefox/webdriver.py
|
dasxran/seleniumMachineLearning
|
3098f836913a89847cb9e308189383a4ea981139
|
[
"MIT"
] | 64
|
2020-07-22T06:24:18.000Z
|
2022-03-27T10:48:15.000Z
|
venv/Lib/site-packages/selenium/webdriver/firefox/webdriver.py
|
dasxran/seleniumMachineLearning
|
3098f836913a89847cb9e308189383a4ea981139
|
[
"MIT"
] | 51
|
2021-04-08T11:39:59.000Z
|
2021-05-07T12:01:27.000Z
|
venv/Lib/site-packages/selenium/webdriver/firefox/webdriver.py
|
dasxran/seleniumMachineLearning
|
3098f836913a89847cb9e308189383a4ea981139
|
[
"MIT"
] | 21
|
2019-03-11T04:25:23.000Z
|
2022-02-03T08:54:33.000Z
|
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import warnings
try:
basestring
except NameError: # Python 3.x
basestring = str
import shutil
import sys
from contextlib import contextmanager
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .extension_connection import ExtensionConnection
from .firefox_binary import FirefoxBinary
from .firefox_profile import FirefoxProfile
from .options import Options
from .remote_connection import FirefoxRemoteConnection
from .service import Service
from .webelement import FirefoxWebElement
class WebDriver(RemoteWebDriver):
# There is no native event support on Mac
NATIVE_EVENTS_ALLOWED = sys.platform != "darwin"
CONTEXT_CHROME = "chrome"
CONTEXT_CONTENT = "content"
_web_element_cls = FirefoxWebElement
def __init__(self, firefox_profile=None, firefox_binary=None,
timeout=30, capabilities=None, proxy=None,
executable_path="geckodriver", options=None,
service_log_path="geckodriver.log", firefox_options=None,
service_args=None, desired_capabilities=None, log_path=None,
keep_alive=True):
"""Starts a new local session of Firefox.
Based on the combination and specificity of the various keyword
arguments, a capabilities dictionary will be constructed that
is passed to the remote end.
The keyword arguments given to this constructor are helpers to
more easily allow Firefox WebDriver sessions to be customised
with different options. They are mapped on to a capabilities
dictionary that is passed on to the remote end.
As some of the options, such as `firefox_profile` and
`options.profile` are mutually exclusive, precedence is
given from how specific the setting is. `capabilities` is the
least specific keyword argument, followed by `options`,
followed by `firefox_binary` and `firefox_profile`.
In practice this means that if `firefox_profile` and
`options.profile` are both set, the selected profile
instance will always come from the most specific variable.
In this case that would be `firefox_profile`. This will result in
`options.profile` to be ignored because it is considered
a less specific setting than the top-level `firefox_profile`
keyword argument. Similarily, if you had specified a
`capabilities["moz:firefoxOptions"]["profile"]` Base64 string,
this would rank below `options.profile`.
:param firefox_profile: Instance of ``FirefoxProfile`` object
or a string. If undefined, a fresh profile will be created
in a temporary location on the system.
:param firefox_binary: Instance of ``FirefoxBinary`` or full
path to the Firefox binary. If undefined, the system default
Firefox installation will be used.
:param timeout: Time to wait for Firefox to launch when using
the extension connection.
:param capabilities: Dictionary of desired capabilities.
:param proxy: The proxy settings to us when communicating with
Firefox via the extension connection.
:param executable_path: Full path to override which geckodriver
binary to use for Firefox 47.0.1 and greater, which
defaults to picking up the binary from the system path.
:param options: Instance of ``options.Options``.
:param service_log_path: Where to log information from the driver.
:param firefox_options: Deprecated argument for options
:param service_args: List of args to pass to the driver service
:param desired_capabilities: alias of capabilities. In future
versions of this library, this will replace 'capabilities'.
This will make the signature consistent with RemoteWebDriver.
:param log_path: Deprecated argument for service_log_path
:param keep_alive: Whether to configure remote_connection.RemoteConnection to use
HTTP keep-alive.
"""
if log_path:
warnings.warn('use service_log_path instead of log_path',
DeprecationWarning, stacklevel=2)
service_log_path = log_path
if firefox_options:
warnings.warn('use options instead of firefox_options',
DeprecationWarning, stacklevel=2)
options = firefox_options
self.binary = None
self.profile = None
self.service = None
# If desired capabilities is set, alias it to capabilities.
# If both are set ignore desired capabilities.
if capabilities is None and desired_capabilities:
capabilities = desired_capabilities
if capabilities is None:
capabilities = DesiredCapabilities.FIREFOX.copy()
if options is None:
options = Options()
capabilities = dict(capabilities)
if capabilities.get("binary"):
self.binary = capabilities["binary"]
# options overrides capabilities
if options is not None:
if options.binary is not None:
self.binary = options.binary
if options.profile is not None:
self.profile = options.profile
# firefox_binary and firefox_profile
# override options
if firefox_binary is not None:
if isinstance(firefox_binary, basestring):
firefox_binary = FirefoxBinary(firefox_binary)
self.binary = firefox_binary
options.binary = firefox_binary
if firefox_profile is not None:
if isinstance(firefox_profile, basestring):
firefox_profile = FirefoxProfile(firefox_profile)
self.profile = firefox_profile
options.profile = firefox_profile
# W3C remote
# TODO(ato): Perform conformance negotiation
if capabilities.get("marionette"):
capabilities.pop("marionette")
self.service = Service(
executable_path,
service_args=service_args,
log_path=service_log_path)
self.service.start()
capabilities.update(options.to_capabilities())
executor = FirefoxRemoteConnection(
remote_server_addr=self.service.service_url)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=True)
# Selenium remote
else:
if self.binary is None:
self.binary = FirefoxBinary()
if self.profile is None:
self.profile = FirefoxProfile()
# disable native events if globally disabled
self.profile.native_events_enabled = (
self.NATIVE_EVENTS_ALLOWED and self.profile.native_events_enabled)
if proxy is not None:
proxy.add_to_capabilities(capabilities)
executor = ExtensionConnection("127.0.0.1", self.profile,
self.binary, timeout)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=keep_alive)
self._is_remote = False
def quit(self):
"""Quits the driver and close every associated window."""
try:
RemoteWebDriver.quit(self)
except Exception:
# We don't care about the message because something probably has gone wrong
pass
if self.w3c:
self.service.stop()
else:
self.binary.kill()
if self.profile is not None:
try:
shutil.rmtree(self.profile.path)
if self.profile.tempfolder is not None:
shutil.rmtree(self.profile.tempfolder)
except Exception as e:
print(str(e))
@property
def firefox_profile(self):
return self.profile
# Extension commands:
def set_context(self, context):
self.execute("SET_CONTEXT", {"context": context})
@contextmanager
def context(self, context):
"""Sets the context that Selenium commands are running in using
a `with` statement. The state of the context on the server is
saved before entering the block, and restored upon exiting it.
:param context: Context, may be one of the class properties
`CONTEXT_CHROME` or `CONTEXT_CONTENT`.
Usage example::
with selenium.context(selenium.CONTEXT_CHROME):
# chrome scope
... do stuff ...
"""
initial_context = self.execute('GET_CONTEXT').pop('value')
self.set_context(context)
try:
yield
finally:
self.set_context(initial_context)
def install_addon(self, path, temporary=None):
"""
Installs Firefox addon.
Returns identifier of installed addon. This identifier can later
be used to uninstall addon.
:param path: Absolute path to the addon that will be installed.
:Usage:
driver.install_addon('/path/to/firebug.xpi')
"""
payload = {"path": path}
if temporary is not None:
payload["temporary"] = temporary
return self.execute("INSTALL_ADDON", payload)["value"]
def uninstall_addon(self, identifier):
"""
Uninstalls Firefox addon using its identifier.
:Usage:
driver.uninstall_addon('addon@foo.com')
"""
self.execute("UNINSTALL_ADDON", {"id": identifier})
| 38.725632
| 89
| 0.648271
|
import warnings
try:
basestring
except NameError: basestring = str
import shutil
import sys
from contextlib import contextmanager
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.remote.webdriver import WebDriver as RemoteWebDriver
from .extension_connection import ExtensionConnection
from .firefox_binary import FirefoxBinary
from .firefox_profile import FirefoxProfile
from .options import Options
from .remote_connection import FirefoxRemoteConnection
from .service import Service
from .webelement import FirefoxWebElement
class WebDriver(RemoteWebDriver):
NATIVE_EVENTS_ALLOWED = sys.platform != "darwin"
CONTEXT_CHROME = "chrome"
CONTEXT_CONTENT = "content"
_web_element_cls = FirefoxWebElement
def __init__(self, firefox_profile=None, firefox_binary=None,
timeout=30, capabilities=None, proxy=None,
executable_path="geckodriver", options=None,
service_log_path="geckodriver.log", firefox_options=None,
service_args=None, desired_capabilities=None, log_path=None,
keep_alive=True):
if log_path:
warnings.warn('use service_log_path instead of log_path',
DeprecationWarning, stacklevel=2)
service_log_path = log_path
if firefox_options:
warnings.warn('use options instead of firefox_options',
DeprecationWarning, stacklevel=2)
options = firefox_options
self.binary = None
self.profile = None
self.service = None
if capabilities is None and desired_capabilities:
capabilities = desired_capabilities
if capabilities is None:
capabilities = DesiredCapabilities.FIREFOX.copy()
if options is None:
options = Options()
capabilities = dict(capabilities)
if capabilities.get("binary"):
self.binary = capabilities["binary"]
if options is not None:
if options.binary is not None:
self.binary = options.binary
if options.profile is not None:
self.profile = options.profile
if firefox_binary is not None:
if isinstance(firefox_binary, basestring):
firefox_binary = FirefoxBinary(firefox_binary)
self.binary = firefox_binary
options.binary = firefox_binary
if firefox_profile is not None:
if isinstance(firefox_profile, basestring):
firefox_profile = FirefoxProfile(firefox_profile)
self.profile = firefox_profile
options.profile = firefox_profile
if capabilities.get("marionette"):
capabilities.pop("marionette")
self.service = Service(
executable_path,
service_args=service_args,
log_path=service_log_path)
self.service.start()
capabilities.update(options.to_capabilities())
executor = FirefoxRemoteConnection(
remote_server_addr=self.service.service_url)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=True)
else:
if self.binary is None:
self.binary = FirefoxBinary()
if self.profile is None:
self.profile = FirefoxProfile()
self.profile.native_events_enabled = (
self.NATIVE_EVENTS_ALLOWED and self.profile.native_events_enabled)
if proxy is not None:
proxy.add_to_capabilities(capabilities)
executor = ExtensionConnection("127.0.0.1", self.profile,
self.binary, timeout)
RemoteWebDriver.__init__(
self,
command_executor=executor,
desired_capabilities=capabilities,
keep_alive=keep_alive)
self._is_remote = False
def quit(self):
try:
RemoteWebDriver.quit(self)
except Exception:
pass
if self.w3c:
self.service.stop()
else:
self.binary.kill()
if self.profile is not None:
try:
shutil.rmtree(self.profile.path)
if self.profile.tempfolder is not None:
shutil.rmtree(self.profile.tempfolder)
except Exception as e:
print(str(e))
@property
def firefox_profile(self):
return self.profile
# Extension commands:
def set_context(self, context):
self.execute("SET_CONTEXT", {"context": context})
@contextmanager
def context(self, context):
initial_context = self.execute('GET_CONTEXT').pop('value')
self.set_context(context)
try:
yield
finally:
self.set_context(initial_context)
def install_addon(self, path, temporary=None):
payload = {"path": path}
if temporary is not None:
payload["temporary"] = temporary
return self.execute("INSTALL_ADDON", payload)["value"]
def uninstall_addon(self, identifier):
self.execute("UNINSTALL_ADDON", {"id": identifier})
| true
| true
|
1c497475a53a4c0124cb3f312edcf589a9dd4c1d
| 14,550
|
py
|
Python
|
models/variation/pix2pix_tm2_mc_full_in2_model.py
|
tkuri/irradiance_estimation
|
3f7e0e8d4772222faad7257a70a8dec0198e4810
|
[
"BSD-3-Clause"
] | 1
|
2020-07-22T18:06:40.000Z
|
2020-07-22T18:06:40.000Z
|
models/variation/pix2pix_tm2_mc_full_in2_model.py
|
tkuri/irradiance_estimation
|
3f7e0e8d4772222faad7257a70a8dec0198e4810
|
[
"BSD-3-Clause"
] | null | null | null |
models/variation/pix2pix_tm2_mc_full_in2_model.py
|
tkuri/irradiance_estimation
|
3f7e0e8d4772222faad7257a70a8dec0198e4810
|
[
"BSD-3-Clause"
] | null | null | null |
import torch
from .base_model import BaseModel
from . import networks
from torch.nn import functional as F
class Pix2PixTm2McFullIn2Model(BaseModel):
""" This class implements the pix2pix model, for learning a mapping from input images to output images given paired data.
The model training requires '--dataset_mode aligned' dataset.
By default, it uses a '--netG unet256' U-Net generator,
a '--netD basic' discriminator (PatchGAN),
and a '--gan_mode' vanilla GAN loss (the cross-entropy objective used in the orignal GAN paper).
pix2pix paper: https://arxiv.org/pdf/1611.07004.pdf
"""
@staticmethod
def modify_commandline_options(parser, is_train=True):
"""Add new dataset-specific options, and rewrite default values for existing options.
Parameters:
parser -- original option parser
is_train (bool) -- whether training phase or test phase. You can use this flag to add training-specific or test-specific options.
Returns:
the modified parser.
For pix2pix, we do not use image buffer
The training objective is: GAN Loss + lambda_L1 * ||G(A)-B||_1
By default, we use vanilla GAN loss, UNet with batchnorm, and aligned datasets.
"""
# changing the default values to match the pix2pix paper (https://phillipi.github.io/pix2pix/)
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned3')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def __init__(self, opt):
"""Initialize the pix2pix class.
Parameters:
opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
"""
BaseModel.__init__(self, opt)
# specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
# specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
self.visual_names = ['real_A', 'fake_B', 'real_B', 'real_C', 'real_C_itp', 'ltm_slice00', 'ltm_slice12', 'ltm_slice24', 'matrix_1_0', 'matrix_1_1', 'matrix_1_2', 'matrix_1_3', 'matrix_2_0', 'matrix_2_1', 'matrix_2_2', 'matrix_2_3']
# specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
if self.isTrain:
# self.model_names = ['G', 'D']
self.model_names = ['G', 'G2', 'D']
else: # during test time, only load G
self.model_names = ['G', 'G2']
# define networks (both generator and discriminator)
self.output_nc = opt.output_nc
self.light_res = opt.light_res
self.intermediate_nc = opt.intermediate_nc
print('opt.output_nc', opt.output_nc)
print('light_res', self.light_res)
print('intermediate_nc', self.intermediate_nc)
self.netG = networks.define_G(opt.input_nc + opt.input2_nc, opt.output_nc*self.intermediate_nc, opt.ngf, 'unet_256_lastrelu', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG2 = networks.define_G(opt.input_nc + opt.input2_nc, self.intermediate_nc, opt.ngf, 'unet_256_lastrelu', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
self.netD = networks.define_D(opt.input_nc + opt.input2_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
# define loss functions
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
# initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_G2 = torch.optim.Adam(self.netG2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_G2)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap images in domain A and domain B.
"""
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.real_C = input['C'].to(self.device)
# self.real_C_itp = F.interpolate(self.real_C, (self.light_res, self.light_res), mode='bicubic', align_corners=False)
self.real_C_itp = F.interpolate(self.real_C, (self.light_res, self.light_res), mode='bilinear', align_corners=False)
self.real_C_itp_flat = self.real_C_itp.view(-1, self.light_res**2, 1) # [1, lsxls, 1]
self.real_C_itp = torch.clamp((F.interpolate(self.real_C_itp, (self.real_C.size(-2), self.real_C.size(-1)), mode='nearest')-0.5)/0.5, min=-1.0, max=1.0)
self.real_AC = torch.cat([self.real_A, self.real_C], dim=1)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
# print("test")
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
sub_matrix1 = self.netG(self.real_AC) # [1, 3xmc, 256, 256]
sub_matrix2 = self.netG2(self.real_AC) # [1, mc, 256, 256]
sub_matrix2 = F.interpolate(sub_matrix2, (self.light_res, self.light_res), mode='bilinear', align_corners=False)# [1, mc, ls, ls]
self.sub_matrix_1 = sub_matrix1.clone()
self.sub_matrix_2 = sub_matrix2.clone()
self.matrix_1 = torch.clamp((sub_matrix1*self.matrix_1_gain-0.5)/0.5, min=-1.0, max=1.0)
self.matrix_1_0 = self.matrix_1[:, [0, self.intermediate_nc, self.intermediate_nc*2], :, :]
self.matrix_1_1 = self.matrix_1[:, [1, 1 + self.intermediate_nc, 1 + self.intermediate_nc*2], :, :]
self.matrix_1_2 = self.matrix_1[:, [2, 2 + self.intermediate_nc, 3 + self.intermediate_nc*2], :, :]
self.matrix_1_3 = self.matrix_1[:, [3, 3 + self.intermediate_nc, 3 + self.intermediate_nc*2], :, :]
self.matrix_2 = torch.clamp((F.interpolate(sub_matrix2, (self.real_B.size(-2), self.real_B.size(-1)), mode='nearest')*self.matrix_2_gain-0.5)/0.5, min=-1.0, max=1.0)
self.matrix_2_0 = torch.unsqueeze(self.matrix_2[:, 0, :, :], 1)
self.matrix_2_1 = torch.unsqueeze(self.matrix_2[:, 1, :, :], 1)
self.matrix_2_2 = torch.unsqueeze(self.matrix_2[:, 2, :, :], 1)
self.matrix_2_3 = torch.unsqueeze(self.matrix_2[:, 3, :, :], 1)
sub_matrix1 = sub_matrix1.view(-1, sub_matrix1.size(1), sub_matrix1.size(2)*sub_matrix1.size(3)) # [1, 3xmc, 256x256]
sub_matrix2 = sub_matrix2.view(-1, sub_matrix2.size(1), sub_matrix2.size(2)*sub_matrix2.size(3)) # [1, mc, lsxls]
sub_matrix1 = torch.transpose(sub_matrix1, 1, 2) # [1, 256x256, 3xmc]
sm1R = sub_matrix1[:, :, 0:self.intermediate_nc] # [1, 256x256, mc]
sm1G = sub_matrix1[:, :, self.intermediate_nc:self.intermediate_nc*2]
sm1B = sub_matrix1[:, :, self.intermediate_nc*2:self.intermediate_nc*3]
bufR = torch.matmul(sm1R, sub_matrix2) # [1, 256x256, lsxls]
bufG = torch.matmul(sm1G, sub_matrix2)
bufB = torch.matmul(sm1B, sub_matrix2)
trans_matrix = torch.cat([bufR, bufG, bufB], dim=1) # [1, 3x256x256, lsxls]
ltm = torch.transpose(trans_matrix, 1, 2) #[25, 25, 3x256x256]
ltm = ltm.reshape(ltm.size(0), ltm.size(1)*self.real_B.size(1), self.real_B.size(2)*self.real_B.size(3)) #[25, 25x3, 256x256]
ltm = ltm.reshape(ltm.size(0), ltm.size(1), self.real_B.size(2), self.real_B.size(3)) #[25, 25x3, 256, 256]
self.ltm_slice00 = torch.clamp((ltm[:, 0:3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0) # [25, 3, 256, 256]
self.ltm_slice12 = torch.clamp((ltm[:, 3*12:3*12+3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0) # [25, 3, 256, 256]
self.ltm_slice24 = torch.clamp((ltm[:, 3*24:3*24+3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0) # [25, 3, 256, 256]
# trans_matrix = torch.matmul(sub_matrix1, sub_matrix2) #[1, 3x256x256, lsxls]
# print('trans_matrix:', trans_matrix.size())
tmR = trans_matrix[:, 0:256**2, :] # [1, 256x256, lsxls]
tmG = trans_matrix[:, 256**2:(256**2)*2, :]
tmB = trans_matrix[:, (256**2)*2:(256**2)*3, :]
# print('tmR:', tmR.size())
bufR = torch.matmul(tmR, self.real_C_itp_flat) # [1, 256x256, 1]
bufG = torch.matmul(tmG, self.real_C_itp_flat)
bufB = torch.matmul(tmB, self.real_C_itp_flat)
# print('bufR:', bufR.size())
buf = torch.cat([bufR, bufG, bufB], dim=2) # [1, 256x256, 3]
buf = torch.transpose(buf, 1, 2) # [1, 3, 256x256]
buf = (buf - 0.5) / 0.5
buf = torch.clamp(buf, min=-1.0, max=1.0)
# print('buf:', buf.size())
self.fake_B = buf.view(self.real_B.size()) # [1, 3, 256, 256]
def forward_linebuf(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
sub_matrix1 = self.netG(self.real_AC) # [1, 3, 256, 256]
sub_matrix2 = self.netG2(self.real_AC) # [1, 1, 256, 256]
sub_matrix2 = F.interpolate(sub_matrix2, (self.light_res, self.light_res), mode='bilinear', align_corners=False)
self.fake_B = torch.zeros_like(self.real_B)
sub_matrix2 = sub_matrix2.view(-1, 1, sub_matrix2.size(-2)*sub_matrix2.size(-1)) * 0.5 + 0.5 # [1, 1, 256x256]
for l in range(sub_matrix1.size(2)):
sub_matrix1_buf = sub_matrix1[:, :, l, :].reshape(-1, sub_matrix1.size(1)*sub_matrix1.size(3), 1) * 0.5 + 0.5 # [1, 3x256, 1]
trans_matrix = torch.matmul(sub_matrix1_buf, sub_matrix2) #[1, 3x256, 256x256]
# print('trans_matrix:', trans_matrix.size())
tmR = trans_matrix[:, 0:256, :] # [1, 256, 256x256]
tmG = trans_matrix[:, 256:256*2, :]
tmB = trans_matrix[:, 256*2:256*3, :]
# print('self.real_C_itp_flat:', self.real_C_itp_flat.size())
# print('tmR:', tmR.size())
bufR = torch.matmul(tmR, self.real_C_itp_flat * 10.0) # [1, 256, 1]
bufG = torch.matmul(tmG, self.real_C_itp_flat * 10.0)
bufB = torch.matmul(tmB, self.real_C_itp_flat * 10.0)
# print('bufR:', bufR.size())
buf = torch.cat([bufR, bufG, bufB], dim=2) # [1, 256, 3]
buf = torch.transpose(buf, 1, 2) # [1, 3, 256]
buf = (buf - 0.5) / 0.5
buf = buf.reshape(self.fake_B.size(0), self.fake_B.size(1), self.fake_B.size(3))
# print('buf:', buf.size())
# print('fake_B:', self.fake_B.size())
self.fake_B[:, :, l, :] = buf # [1, 3, 1, 256] <- [1,3,256]
def backward_D(self):
"""Calculate GAN loss for the discriminator"""
# Fake; stop backprop to the generator by detaching fake_B
# fake_AB = torch.cat((self.real_A, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
# pred_fake = self.netD(fake_AB.detach())
fake_ACB = torch.cat((self.real_AC, self.fake_B), 1) # we use conditional GANs; we need to feed both input and output to the discriminator
pred_fake = self.netD(fake_ACB.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False)
# Real
# real_AB = torch.cat((self.real_A, self.real_B), 1)
# pred_real = self.netD(real_AB)
real_ACB = torch.cat((self.real_AC, self.real_B), 1)
pred_real = self.netD(real_ACB)
self.loss_D_real = self.criterionGAN(pred_real, True)
# combine loss and calculate gradients
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self):
"""Calculate GAN and L1 loss for the generator"""
# First, G(A) should fake the discriminator
# fake_AB = torch.cat((self.real_A, self.fake_B), 1)
# pred_fake = self.netD(fake_AB)
fake_ACB = torch.cat((self.real_AC, self.fake_B), 1)
pred_fake = self.netD(fake_ACB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
# Second, G(A) = B
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
# combine loss and calculate gradients
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()
def optimize_parameters(self):
self.forward() # compute fake images: G(A)
# update D
self.set_requires_grad(self.netD, True) # enable backprop for D
self.optimizer_D.zero_grad() # set D's gradients to zero
self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
# update G
self.set_requires_grad(self.netD, False) # D requires no gradients when optimizing G
# self.optimizer_G.zero_grad() # set G's gradients to zero
# self.backward_G() # calculate graidents for G
# self.optimizer_G.step() # udpate G's weights
self.optimizer_G.zero_grad() # set G's gradients to zero
self.optimizer_G2.zero_grad() # set G's gradients to zero
self.backward_G() # calculate graidents for G
self.optimizer_G.step() # udpate G's weights
self.optimizer_G2.step() # udpate G's weights
| 58.669355
| 239
| 0.620619
|
import torch
from .base_model import BaseModel
from . import networks
from torch.nn import functional as F
class Pix2PixTm2McFullIn2Model(BaseModel):
@staticmethod
def modify_commandline_options(parser, is_train=True):
parser.set_defaults(norm='batch', netG='unet_256', dataset_mode='aligned3')
if is_train:
parser.set_defaults(pool_size=0, gan_mode='vanilla')
parser.add_argument('--lambda_L1', type=float, default=100.0, help='weight for L1 loss')
return parser
def __init__(self, opt):
BaseModel.__init__(self, opt)
self.loss_names = ['G_GAN', 'G_L1', 'D_real', 'D_fake']
self.visual_names = ['real_A', 'fake_B', 'real_B', 'real_C', 'real_C_itp', 'ltm_slice00', 'ltm_slice12', 'ltm_slice24', 'matrix_1_0', 'matrix_1_1', 'matrix_1_2', 'matrix_1_3', 'matrix_2_0', 'matrix_2_1', 'matrix_2_2', 'matrix_2_3']
if self.isTrain:
self.model_names = ['G', 'G2', 'D']
else: self.model_names = ['G', 'G2']
self.output_nc = opt.output_nc
self.light_res = opt.light_res
self.intermediate_nc = opt.intermediate_nc
print('opt.output_nc', opt.output_nc)
print('light_res', self.light_res)
print('intermediate_nc', self.intermediate_nc)
self.netG = networks.define_G(opt.input_nc + opt.input2_nc, opt.output_nc*self.intermediate_nc, opt.ngf, 'unet_256_lastrelu', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
self.netG2 = networks.define_G(opt.input_nc + opt.input2_nc, self.intermediate_nc, opt.ngf, 'unet_256_lastrelu', opt.norm,
not opt.no_dropout, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain: self.netD = networks.define_D(opt.input_nc + opt.input2_nc + opt.output_nc, opt.ndf, opt.netD,
opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain, self.gpu_ids)
if self.isTrain:
self.criterionGAN = networks.GANLoss(opt.gan_mode).to(self.device)
self.criterionL1 = torch.nn.L1Loss()
self.optimizer_G = torch.optim.Adam(self.netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_G2 = torch.optim.Adam(self.netG2.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizer_D = torch.optim.Adam(self.netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
self.optimizers.append(self.optimizer_G)
self.optimizers.append(self.optimizer_G2)
self.optimizers.append(self.optimizer_D)
def set_input(self, input):
AtoB = self.opt.direction == 'AtoB'
self.real_A = input['A' if AtoB else 'B'].to(self.device)
self.real_B = input['B' if AtoB else 'A'].to(self.device)
self.real_C = input['C'].to(self.device)
self.real_C_itp = F.interpolate(self.real_C, (self.light_res, self.light_res), mode='bilinear', align_corners=False)
self.real_C_itp_flat = self.real_C_itp.view(-1, self.light_res**2, 1) self.real_C_itp = torch.clamp((F.interpolate(self.real_C_itp, (self.real_C.size(-2), self.real_C.size(-1)), mode='nearest')-0.5)/0.5, min=-1.0, max=1.0)
self.real_AC = torch.cat([self.real_A, self.real_C], dim=1)
self.image_paths = input['A_paths' if AtoB else 'B_paths']
def forward(self):
sub_matrix1 = self.netG(self.real_AC) sub_matrix2 = self.netG2(self.real_AC) sub_matrix2 = F.interpolate(sub_matrix2, (self.light_res, self.light_res), mode='bilinear', align_corners=False)
self.sub_matrix_1 = sub_matrix1.clone()
self.sub_matrix_2 = sub_matrix2.clone()
self.matrix_1 = torch.clamp((sub_matrix1*self.matrix_1_gain-0.5)/0.5, min=-1.0, max=1.0)
self.matrix_1_0 = self.matrix_1[:, [0, self.intermediate_nc, self.intermediate_nc*2], :, :]
self.matrix_1_1 = self.matrix_1[:, [1, 1 + self.intermediate_nc, 1 + self.intermediate_nc*2], :, :]
self.matrix_1_2 = self.matrix_1[:, [2, 2 + self.intermediate_nc, 3 + self.intermediate_nc*2], :, :]
self.matrix_1_3 = self.matrix_1[:, [3, 3 + self.intermediate_nc, 3 + self.intermediate_nc*2], :, :]
self.matrix_2 = torch.clamp((F.interpolate(sub_matrix2, (self.real_B.size(-2), self.real_B.size(-1)), mode='nearest')*self.matrix_2_gain-0.5)/0.5, min=-1.0, max=1.0)
self.matrix_2_0 = torch.unsqueeze(self.matrix_2[:, 0, :, :], 1)
self.matrix_2_1 = torch.unsqueeze(self.matrix_2[:, 1, :, :], 1)
self.matrix_2_2 = torch.unsqueeze(self.matrix_2[:, 2, :, :], 1)
self.matrix_2_3 = torch.unsqueeze(self.matrix_2[:, 3, :, :], 1)
sub_matrix1 = sub_matrix1.view(-1, sub_matrix1.size(1), sub_matrix1.size(2)*sub_matrix1.size(3)) sub_matrix2 = sub_matrix2.view(-1, sub_matrix2.size(1), sub_matrix2.size(2)*sub_matrix2.size(3)) sub_matrix1 = torch.transpose(sub_matrix1, 1, 2) sm1R = sub_matrix1[:, :, 0:self.intermediate_nc] sm1G = sub_matrix1[:, :, self.intermediate_nc:self.intermediate_nc*2]
sm1B = sub_matrix1[:, :, self.intermediate_nc*2:self.intermediate_nc*3]
bufR = torch.matmul(sm1R, sub_matrix2) bufG = torch.matmul(sm1G, sub_matrix2)
bufB = torch.matmul(sm1B, sub_matrix2)
trans_matrix = torch.cat([bufR, bufG, bufB], dim=1)
ltm = torch.transpose(trans_matrix, 1, 2) ltm = ltm.reshape(ltm.size(0), ltm.size(1)*self.real_B.size(1), self.real_B.size(2)*self.real_B.size(3)) ltm = ltm.reshape(ltm.size(0), ltm.size(1), self.real_B.size(2), self.real_B.size(3))
self.ltm_slice00 = torch.clamp((ltm[:, 0:3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0) self.ltm_slice12 = torch.clamp((ltm[:, 3*12:3*12+3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0) self.ltm_slice24 = torch.clamp((ltm[:, 3*24:3*24+3, :, :] - 0.5) / 0.5, min=-1.0, max=1.0)
tmR = trans_matrix[:, 0:256**2, :] tmG = trans_matrix[:, 256**2:(256**2)*2, :]
tmB = trans_matrix[:, (256**2)*2:(256**2)*3, :]
bufR = torch.matmul(tmR, self.real_C_itp_flat) bufG = torch.matmul(tmG, self.real_C_itp_flat)
bufB = torch.matmul(tmB, self.real_C_itp_flat)
buf = torch.cat([bufR, bufG, bufB], dim=2) buf = torch.transpose(buf, 1, 2) buf = (buf - 0.5) / 0.5
buf = torch.clamp(buf, min=-1.0, max=1.0)
self.fake_B = buf.view(self.real_B.size())
def forward_linebuf(self):
sub_matrix1 = self.netG(self.real_AC) sub_matrix2 = self.netG2(self.real_AC) sub_matrix2 = F.interpolate(sub_matrix2, (self.light_res, self.light_res), mode='bilinear', align_corners=False)
self.fake_B = torch.zeros_like(self.real_B)
sub_matrix2 = sub_matrix2.view(-1, 1, sub_matrix2.size(-2)*sub_matrix2.size(-1)) * 0.5 + 0.5
for l in range(sub_matrix1.size(2)):
sub_matrix1_buf = sub_matrix1[:, :, l, :].reshape(-1, sub_matrix1.size(1)*sub_matrix1.size(3), 1) * 0.5 + 0.5 trans_matrix = torch.matmul(sub_matrix1_buf, sub_matrix2) tmR = trans_matrix[:, 0:256, :] tmG = trans_matrix[:, 256:256*2, :]
tmB = trans_matrix[:, 256*2:256*3, :]
bufR = torch.matmul(tmR, self.real_C_itp_flat * 10.0) bufG = torch.matmul(tmG, self.real_C_itp_flat * 10.0)
bufB = torch.matmul(tmB, self.real_C_itp_flat * 10.0)
buf = torch.cat([bufR, bufG, bufB], dim=2) buf = torch.transpose(buf, 1, 2) buf = (buf - 0.5) / 0.5
buf = buf.reshape(self.fake_B.size(0), self.fake_B.size(1), self.fake_B.size(3))
self.fake_B[:, :, l, :] = buf
def backward_D(self):
fake_ACB = torch.cat((self.real_AC, self.fake_B), 1) pred_fake = self.netD(fake_ACB.detach())
self.loss_D_fake = self.criterionGAN(pred_fake, False)
real_ACB = torch.cat((self.real_AC, self.real_B), 1)
pred_real = self.netD(real_ACB)
self.loss_D_real = self.criterionGAN(pred_real, True)
self.loss_D = (self.loss_D_fake + self.loss_D_real) * 0.5
self.loss_D.backward()
def backward_G(self):
fake_ACB = torch.cat((self.real_AC, self.fake_B), 1)
pred_fake = self.netD(fake_ACB)
self.loss_G_GAN = self.criterionGAN(pred_fake, True)
self.loss_G_L1 = self.criterionL1(self.fake_B, self.real_B) * self.opt.lambda_L1
self.loss_G = self.loss_G_GAN + self.loss_G_L1
self.loss_G.backward()
def optimize_parameters(self):
self.forward() self.set_requires_grad(self.netD, True) self.optimizer_D.zero_grad() self.backward_D() # calculate gradients for D
self.optimizer_D.step() # update D's weights
self.set_requires_grad(self.netD, False) # self.backward_G() # calculate graidents for G
# self.optimizer_G.step() # udpate G's weights
self.optimizer_G.zero_grad() self.optimizer_G2.zero_grad() # set G's gradients to zero
self.backward_G() self.optimizer_G.step() self.optimizer_G2.step() # udpate G's weights
| true
| true
|
1c497491d95957aa66acaa71fdefe22a342c41c1
| 19,473
|
py
|
Python
|
mudata/_core/io.py
|
scverse/mudata
|
fbfc634e8f17bd70ed67bb8a37951564f16b61e6
|
[
"BSD-3-Clause"
] | 12
|
2022-01-10T14:11:23.000Z
|
2022-03-17T13:03:45.000Z
|
mudata/_core/io.py
|
scverse/mudata
|
fbfc634e8f17bd70ed67bb8a37951564f16b61e6
|
[
"BSD-3-Clause"
] | 10
|
2022-01-24T15:09:03.000Z
|
2022-03-29T03:47:28.000Z
|
mudata/_core/io.py
|
scverse/mudata
|
fbfc634e8f17bd70ed67bb8a37951564f16b61e6
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import zarr
from typing import Union
from os import PathLike
import os
from warnings import warn
from collections.abc import MutableMapping
import numpy as np
import h5py
import anndata as ad
from anndata import AnnData
# from anndata.compat import _read_hdf5_attribute # 0.8
from pathlib import Path
from scipy import sparse
from mudata import MuData
from .file_backing import MuDataFileManager, AnnDataFileManager
#
# Saving multimodal data objects
#
def _write_h5mu(file: h5py.File, mdata: MuData, write_data=True, **kwargs):
from anndata._io.specs.registry import write_elem
from .. import __version__, __mudataversion__, __anndataversion__
write_elem(
file,
"obs",
mdata.strings_to_categoricals(mdata._shrink_attr("obs", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(
file,
"var",
mdata.strings_to_categoricals(mdata._shrink_attr("var", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(file, "obsm", dict(mdata.obsm), dataset_kwargs=kwargs)
write_elem(file, "varm", dict(mdata.varm), dataset_kwargs=kwargs)
write_elem(file, "obsp", dict(mdata.obsp), dataset_kwargs=kwargs)
write_elem(file, "varp", dict(mdata.varp), dataset_kwargs=kwargs)
write_elem(file, "uns", dict(mdata.uns), dataset_kwargs=kwargs)
write_elem(file, "obsmap", dict(mdata.obsmap), dataset_kwargs=kwargs)
write_elem(file, "varmap", dict(mdata.varmap), dataset_kwargs=kwargs)
attrs = file.attrs
attrs["axis"] = mdata.axis
mod = file.require_group("mod")
for k, v in mdata.mod.items():
group = mod.require_group(k)
adata = mdata.mod[k]
adata.strings_to_categoricals()
if adata.raw is not None:
adata.strings_to_categoricals(adata.raw.var)
if write_data:
write_elem(group, "X", adata.X, dataset_kwargs=kwargs)
if adata.raw is not None:
write_elem(group, "raw", adata.raw)
write_elem(group, "obs", adata.obs, dataset_kwargs=kwargs)
write_elem(group, "var", adata.var, dataset_kwargs=kwargs)
write_elem(group, "obsm", dict(adata.obsm), dataset_kwargs=kwargs)
write_elem(group, "varm", dict(adata.varm), dataset_kwargs=kwargs)
write_elem(group, "obsp", dict(adata.obsp), dataset_kwargs=kwargs)
write_elem(group, "varp", dict(adata.varp), dataset_kwargs=kwargs)
write_elem(group, "layers", dict(adata.layers), dataset_kwargs=kwargs)
write_elem(group, "uns", dict(adata.uns), dataset_kwargs=kwargs)
attrs = group.attrs
attrs["encoding-type"] = "anndata"
attrs["encoding-version"] = __anndataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
mod_attrs = mod.attrs
mod_attrs["mod-order"] = list(mdata.mod.keys())
attrs = file.attrs
attrs["encoding-type"] = "MuData"
attrs["encoding-version"] = __mudataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
# Restore top-level annotation
if not mdata.is_view or not mdata.isbacked:
mdata.update()
def write_zarr(
store: Union[MutableMapping, str, Path],
data: Union[MuData, AnnData],
chunks=None,
write_data=True,
**kwargs,
):
"""
Write MuData or AnnData object to the Zarr store
Matrices - sparse or dense - are currently stored as they are.
"""
import zarr
from anndata._io.specs.registry import write_elem
from anndata._io.zarr import write_zarr as anndata_write_zarr
from .. import __version__, __mudataversion__, __anndataversion__
if isinstance(data, AnnData):
adata = data
anndata_write_zarr(store, adata, chunks=chunks, **kwargs)
elif isinstance(data, MuData):
if isinstance(store, Path):
store = str(store)
file = zarr.open(store, mode="w")
mdata = data
write_elem(
file,
"obs",
mdata.strings_to_categoricals(mdata._shrink_attr("obs", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(
file,
"var",
mdata.strings_to_categoricals(mdata._shrink_attr("var", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(file, "obsm", dict(mdata.obsm), dataset_kwargs=kwargs)
write_elem(file, "varm", dict(mdata.varm), dataset_kwargs=kwargs)
write_elem(file, "obsp", dict(mdata.obsp), dataset_kwargs=kwargs)
write_elem(file, "varp", dict(mdata.varp), dataset_kwargs=kwargs)
write_elem(file, "uns", dict(mdata.uns), dataset_kwargs=kwargs)
write_elem(file, "obsmap", dict(mdata.obsmap), dataset_kwargs=kwargs)
write_elem(file, "varmap", dict(mdata.varmap), dataset_kwargs=kwargs)
attrs = file.attrs
attrs["axis"] = mdata.axis
mod = file.require_group("mod")
for k, v in mdata.mod.items():
group = mod.require_group(k)
adata = mdata.mod[k]
adata.strings_to_categoricals()
if adata.raw is not None:
adata.strings_to_categoricals(adata.raw.var)
if write_data:
if chunks is not None and not isinstance(adata.X, sparse.spmatrix):
write_elem(group, "X", adata.X, dataset_kwargs=dict(chunks=chunks, **kwargs))
else:
write_elem(group, "X", adata.X, dataset_kwargs=kwargs)
if adata.raw is not None:
write_elem(group, "raw", adata.raw)
write_elem(group, "obs", adata.obs, dataset_kwargs=kwargs)
write_elem(group, "var", adata.var, dataset_kwargs=kwargs)
write_elem(group, "obsm", dict(adata.obsm), dataset_kwargs=kwargs)
write_elem(group, "varm", dict(adata.varm), dataset_kwargs=kwargs)
write_elem(group, "obsp", dict(adata.obsp), dataset_kwargs=kwargs)
write_elem(group, "varp", dict(adata.varp), dataset_kwargs=kwargs)
write_elem(group, "layers", dict(adata.layers), dataset_kwargs=kwargs)
write_elem(group, "uns", dict(adata.uns), dataset_kwargs=kwargs)
attrs = group.attrs
attrs["encoding-type"] = "anndata"
attrs["encoding-version"] = __anndataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
mod_attrs = mod.attrs
mod_attrs["mod-order"] = list(mdata.mod.keys())
attrs = file.attrs
attrs["encoding-type"] = "MuData"
attrs["encoding-version"] = __mudataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
# Restore top-level annotation
if not mdata.is_view or not mdata.isbacked:
mdata.update()
def write_h5mu(filename: PathLike, mdata: MuData, **kwargs):
"""
Write MuData object to the HDF5 file
Matrices - sparse or dense - are currently stored as they are.
"""
from .. import __version__, __mudataversion__, __anndataversion__
with h5py.File(filename, "w", userblock_size=512) as f:
_write_h5mu(f, mdata, **kwargs)
with open(filename, "br+") as f:
nbytes = f.write(
f"MuData (format-version={__mudataversion__};creator=muon;creator-version={__version__})".encode(
"utf-8"
)
)
f.write(
b"\0" * (512 - nbytes)
) # this is only needed because the H5file was written in append mode
def write_h5ad(filename: PathLike, mod: str, data: Union[MuData, AnnData]):
"""
Write AnnData object to the HDF5 file with a MuData container
Currently is based on anndata._io.h5ad.write_h5ad internally.
Matrices - sparse or dense - are currently stored as they are.
Ideally this is merged later to anndata._io.h5ad.write_h5ad.
"""
from anndata._io.specs.registry import write_elem
from anndata._io.h5ad import write_h5ad
from .. import __version__, __anndataversion__
if isinstance(data, AnnData):
adata = data
elif isinstance(data, MuData):
adata = data.mod[mod]
else:
raise TypeError(f"Expected AnnData or MuData object with {mod} modality")
with h5py.File(filename, "r+") as f:
# Check that 'mod' is present
if not "mod" in f:
raise ValueError("The .h5mu object has to contain .mod slot")
fm = f["mod"]
# Remove the modality if it exists
if mod in fm:
del fm[mod]
fmd = fm.create_group(mod)
adata.strings_to_categoricals()
if adata.raw is not None:
adata.strings_to_categoricals(adata.raw.var)
filepath = Path(filename)
if not (adata.isbacked and Path(adata.filename) == Path(filepath)):
write_elem(fmd, f"X", adata.X)
# NOTE: Calling write_elem() does not allow writing .raw into .h5mu modalities
if adata.raw is not None:
write_elem(f, f"mod/{mod}/raw", adata.raw)
write_elem(fmd, "obs", adata.obs)
write_elem(fmd, "var", adata.var)
write_elem(fmd, "obsm", dict(adata.obsm))
write_elem(fmd, "varm", dict(adata.varm))
write_elem(fmd, "obsp", dict(adata.obsp))
write_elem(fmd, "varp", dict(adata.varp))
write_elem(fmd, "layers", dict(adata.layers))
write_elem(fmd, "uns", dict(adata.uns))
attrs = fmd.attrs
attrs["encoding-type"] = "anndata"
attrs["encoding-version"] = __anndataversion__
attrs["encoder"] = "muon"
attrs["encoder-version"] = __version__
write_anndata = write_h5ad
def write(filename: PathLike, data: Union[MuData, AnnData]):
"""
Write MuData or AnnData to an HDF5 file
This function is designed to enhance I/O ease of use.
It recognises the following formats of filename:
- for MuData
- FILE.h5mu
- for AnnData
- FILE.h5mu/MODALITY
- FILE.h5mu/mod/MODALITY
- FILE.h5ad
"""
import re
if filename.endswith(".h5mu") or isinstance(data, MuData):
assert filename.endswith(".h5mu") and isinstance(
data, MuData
), "Can only save MuData object to .h5mu file"
write_h5mu(filename, data)
else:
assert isinstance(data, AnnData), "Only MuData and AnnData objects are accepted"
m = re.search("^(.+)\.(h5mu)[/]?([A-Za-z]*)[/]?([/A-Za-z]*)$", filename)
if m is not None:
m = m.groups()
else:
raise ValueError("Expected non-empty .h5ad or .h5mu file name")
filepath = ".".join([m[0], m[1]])
if m[1] == "h5mu":
if m[3] == "":
# .h5mu/<modality>
return write_h5ad(filepath, m[2], data)
elif m[2] == "mod":
# .h5mu/mod/<modality>
return write_h5ad(filepath, m[3], data)
else:
raise ValueError(
"If a single modality to be written from a .h5mu file, \
provide it after the filename separated by slash symbol:\
.h5mu/rna or .h5mu/mod/rna"
)
elif m[1] == "h5ad":
return data.write(filepath)
else:
raise ValueError()
#
# Reading from multimodal data objects
#
def read_h5mu(filename: PathLike, backed: Union[str, bool, None] = None):
"""
Read MuData object from HDF5 file
"""
assert backed in [
None,
True,
False,
"r",
"r+",
], "Argument `backed` should be boolean, or r/r+, or None"
from anndata._io.specs.registry import read_elem
from anndata._io.h5ad import read_dataframe
if backed is True or not backed:
mode = "r"
else:
mode = backed
manager = MuDataFileManager(filename, mode) if backed else MuDataFileManager()
with open(filename, "rb") as f:
ish5mu = f.read(6) == b"MuData"
if not ish5mu:
if h5py.is_hdf5(filename):
warn(
"The HDF5 file was not created by muon, we can't guarantee that everything will work correctly"
)
else:
raise ValueError("The file is not an HDF5 file")
with h5py.File(filename, mode) as f:
d = {}
for k in f.keys():
if k in ["obs", "var"]:
d[k] = read_dataframe(f[k])
if k == "mod":
mods = {}
gmods = f[k]
for m in gmods.keys():
ad = _read_h5mu_mod(gmods[m], manager, backed not in (None, False))
mods[m] = ad
mod_order = None
if "mod-order" in gmods.attrs:
mod_order = gmods.attrs["mod-order"]
# TODO: use in v0.8
# mod_order = _read_hdf5_attribute(k, "mod-order")
if mod_order is not None and all([m in gmods for m in mod_order]):
mods = {k: mods[k] for k in mod_order}
d[k] = mods
else:
d[k] = read_elem(f[k])
if "axis" in f.attrs:
d["axis"] = f.attrs["axis"]
mu = MuData._init_from_dict_(**d)
mu.file = manager
return mu
def read_zarr(store: Union[str, Path, MutableMapping, zarr.Group]):
"""\
Read from a hierarchical Zarr array store.
Parameters
----------
store
The filename, a :class:`~typing.MutableMapping`, or a Zarr storage class.
"""
import zarr
from anndata._io.specs.registry import read_elem
from anndata._io.zarr import (
read_zarr as anndata_read_zarr,
read_dataframe,
_read_legacy_raw,
_clean_uns,
)
if isinstance(store, Path):
store = str(store)
f = zarr.open(store, mode="r")
d = {}
if "mod" not in f.keys():
return anndata_read_zarr(store)
manager = MuDataFileManager()
for k in f.keys():
if k in {"obs", "var"}:
d[k] = read_dataframe(f[k])
if k == "mod":
mods = {}
gmods = f[k]
for m in gmods.keys():
ad = _read_zarr_mod(gmods[m], manager)
mods[m] = ad
d[k] = mods
else: # Base case
d[k] = read_elem(f[k])
mu = MuData._init_from_dict_(**d)
mu.file = manager
return mu
def _read_zarr_mod(g: zarr.Group, manager: MuDataFileManager = None, backed: bool = False) -> dict:
import zarr
from anndata._io.specs.registry import read_elem
from anndata._io.zarr import read_dataframe, _read_legacy_raw
from anndata import Raw
d = {}
for k in g.keys():
if k in ("obs", "var"):
d[k] = read_dataframe(g[k])
elif k == "X":
X = g["X"]
if isinstance(X, zarr.Group):
dtype = X["data"].dtype
elif hasattr(X, "dtype"):
dtype = X.dtype
else:
raise ValueError()
d["dtype"] = dtype
if not backed:
d["X"] = read_elem(X)
elif k != "raw":
d[k] = read_elem(g[k])
ad = AnnData(**d)
if manager is not None:
ad.file = AnnDataFileManager(ad, os.path.basename(g.name), manager)
raw = _read_legacy_raw(
g,
d.get("raw"),
read_dataframe,
read_elem,
attrs=("var", "varm") if backed else ("var", "varm", "X"),
)
if raw:
ad._raw = Raw(ad, **raw)
return ad
def _read_h5mu_mod(
g: "h5py.Group", manager: MuDataFileManager = None, backed: bool = False
) -> dict:
from anndata._io.specs.registry import read_elem
from anndata._io.h5ad import read_dataframe, _read_raw
from anndata import Raw
d = {}
for k in g.keys():
if k in ("obs", "var"):
d[k] = read_dataframe(g[k])
elif k == "X":
X = g["X"]
if isinstance(X, h5py.Group):
dtype = X["data"].dtype
elif hasattr(X, "dtype"):
dtype = X.dtype
else:
raise ValueError()
d["dtype"] = dtype
if not backed:
d["X"] = read_elem(X)
elif k != "raw":
d[k] = read_elem(g[k])
ad = AnnData(**d)
if manager is not None:
ad.file = AnnDataFileManager(ad, os.path.basename(g.name), manager)
raw = _read_raw(g, attrs=("var", "varm") if backed else ("var", "varm", "X"))
if raw:
ad._raw = Raw(ad, **raw)
return ad
def read_h5ad(
filename: PathLike,
mod: str,
backed: Union[str, bool, None] = None,
) -> AnnData:
"""
Read AnnData object from inside a .h5mu file
or from a standalone .h5ad file
Currently replicates and modifies anndata._io.h5ad.read_h5ad.
Matrices are loaded as they are in the file (sparse or dense).
Ideally this is merged later to anndata._io.h5ad.read_h5ad.
"""
assert backed in [
None,
True,
False,
"r",
"r+",
], "Argument `backed` should be boolean, or r/r+, or None"
from anndata._io.specs.registry import read_elem
from anndata._io.h5ad import read_dataframe, _read_raw
d = {}
hdf5_mode = "r"
if backed not in {None, False}:
hdf5_mode = backed
if hdf5_mode is True:
hdf5_mode = "r+"
assert hdf5_mode in {"r", "r+"}
backed = True
manager = MuDataFileManager(filename, hdf5_mode)
else:
backed = False
manager = None
with h5py.File(filename, hdf5_mode) as f_root:
f = f_root["mod"][mod]
return _read_h5mu_mod(f, manager, backed)
read_anndata = read_h5ad
def read(filename: PathLike, **kwargs) -> Union[MuData, AnnData]:
"""
Read MuData object from HDF5 file
or AnnData object (a single modality) inside it
This function is designed to enhance I/O ease of use.
It recognises the following formats:
- FILE.h5mu
- FILE.h5mu/MODALITY
- FILE.h5mu/mod/MODALITY
- FILE.h5ad
"""
import re
m = re.search("^(.+)\.(h5mu)[/]?([A-Za-z]*)[/]?([/A-Za-z]*)$", filename)
if m is not None:
m = m.groups()
else:
if filename.endswith(".h5ad"):
m = [filename[:-5], "h5ad", "", ""]
else:
raise ValueError("Expected non-empty .h5ad or .h5mu file name")
filepath = ".".join([m[0], m[1]])
if m[1] == "h5mu":
if all(i == 0 for i in map(len, m[2:])):
# Ends with .h5mu
return read_h5mu(filepath, **kwargs)
elif m[3] == "":
# .h5mu/<modality>
return read_h5ad(filepath, m[2], **kwargs)
elif m[2] == "mod":
# .h5mu/mod/<modality>
return read_h5ad(filepath, m[3], **kwargs)
else:
raise ValueError(
"If a single modality to be read from a .h5mu file, \
provide it after the filename separated by slash symbol:\
.h5mu/rna or .h5mu/mod/rna"
)
elif m[1] == "h5ad":
return ad.read_h5ad(filepath, **kwargs)
else:
raise ValueError("The file format is not recognised, expected to be an .h5mu or .h5ad file")
| 31.612013
| 111
| 0.583629
|
from __future__ import annotations
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import zarr
from typing import Union
from os import PathLike
import os
from warnings import warn
from collections.abc import MutableMapping
import numpy as np
import h5py
import anndata as ad
from anndata import AnnData
from pathlib import Path
from scipy import sparse
from mudata import MuData
from .file_backing import MuDataFileManager, AnnDataFileManager
def _write_h5mu(file: h5py.File, mdata: MuData, write_data=True, **kwargs):
from anndata._io.specs.registry import write_elem
from .. import __version__, __mudataversion__, __anndataversion__
write_elem(
file,
"obs",
mdata.strings_to_categoricals(mdata._shrink_attr("obs", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(
file,
"var",
mdata.strings_to_categoricals(mdata._shrink_attr("var", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(file, "obsm", dict(mdata.obsm), dataset_kwargs=kwargs)
write_elem(file, "varm", dict(mdata.varm), dataset_kwargs=kwargs)
write_elem(file, "obsp", dict(mdata.obsp), dataset_kwargs=kwargs)
write_elem(file, "varp", dict(mdata.varp), dataset_kwargs=kwargs)
write_elem(file, "uns", dict(mdata.uns), dataset_kwargs=kwargs)
write_elem(file, "obsmap", dict(mdata.obsmap), dataset_kwargs=kwargs)
write_elem(file, "varmap", dict(mdata.varmap), dataset_kwargs=kwargs)
attrs = file.attrs
attrs["axis"] = mdata.axis
mod = file.require_group("mod")
for k, v in mdata.mod.items():
group = mod.require_group(k)
adata = mdata.mod[k]
adata.strings_to_categoricals()
if adata.raw is not None:
adata.strings_to_categoricals(adata.raw.var)
if write_data:
write_elem(group, "X", adata.X, dataset_kwargs=kwargs)
if adata.raw is not None:
write_elem(group, "raw", adata.raw)
write_elem(group, "obs", adata.obs, dataset_kwargs=kwargs)
write_elem(group, "var", adata.var, dataset_kwargs=kwargs)
write_elem(group, "obsm", dict(adata.obsm), dataset_kwargs=kwargs)
write_elem(group, "varm", dict(adata.varm), dataset_kwargs=kwargs)
write_elem(group, "obsp", dict(adata.obsp), dataset_kwargs=kwargs)
write_elem(group, "varp", dict(adata.varp), dataset_kwargs=kwargs)
write_elem(group, "layers", dict(adata.layers), dataset_kwargs=kwargs)
write_elem(group, "uns", dict(adata.uns), dataset_kwargs=kwargs)
attrs = group.attrs
attrs["encoding-type"] = "anndata"
attrs["encoding-version"] = __anndataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
mod_attrs = mod.attrs
mod_attrs["mod-order"] = list(mdata.mod.keys())
attrs = file.attrs
attrs["encoding-type"] = "MuData"
attrs["encoding-version"] = __mudataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
if not mdata.is_view or not mdata.isbacked:
mdata.update()
def write_zarr(
store: Union[MutableMapping, str, Path],
data: Union[MuData, AnnData],
chunks=None,
write_data=True,
**kwargs,
):
import zarr
from anndata._io.specs.registry import write_elem
from anndata._io.zarr import write_zarr as anndata_write_zarr
from .. import __version__, __mudataversion__, __anndataversion__
if isinstance(data, AnnData):
adata = data
anndata_write_zarr(store, adata, chunks=chunks, **kwargs)
elif isinstance(data, MuData):
if isinstance(store, Path):
store = str(store)
file = zarr.open(store, mode="w")
mdata = data
write_elem(
file,
"obs",
mdata.strings_to_categoricals(mdata._shrink_attr("obs", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(
file,
"var",
mdata.strings_to_categoricals(mdata._shrink_attr("var", inplace=False)),
dataset_kwargs=kwargs,
)
write_elem(file, "obsm", dict(mdata.obsm), dataset_kwargs=kwargs)
write_elem(file, "varm", dict(mdata.varm), dataset_kwargs=kwargs)
write_elem(file, "obsp", dict(mdata.obsp), dataset_kwargs=kwargs)
write_elem(file, "varp", dict(mdata.varp), dataset_kwargs=kwargs)
write_elem(file, "uns", dict(mdata.uns), dataset_kwargs=kwargs)
write_elem(file, "obsmap", dict(mdata.obsmap), dataset_kwargs=kwargs)
write_elem(file, "varmap", dict(mdata.varmap), dataset_kwargs=kwargs)
attrs = file.attrs
attrs["axis"] = mdata.axis
mod = file.require_group("mod")
for k, v in mdata.mod.items():
group = mod.require_group(k)
adata = mdata.mod[k]
adata.strings_to_categoricals()
if adata.raw is not None:
adata.strings_to_categoricals(adata.raw.var)
if write_data:
if chunks is not None and not isinstance(adata.X, sparse.spmatrix):
write_elem(group, "X", adata.X, dataset_kwargs=dict(chunks=chunks, **kwargs))
else:
write_elem(group, "X", adata.X, dataset_kwargs=kwargs)
if adata.raw is not None:
write_elem(group, "raw", adata.raw)
write_elem(group, "obs", adata.obs, dataset_kwargs=kwargs)
write_elem(group, "var", adata.var, dataset_kwargs=kwargs)
write_elem(group, "obsm", dict(adata.obsm), dataset_kwargs=kwargs)
write_elem(group, "varm", dict(adata.varm), dataset_kwargs=kwargs)
write_elem(group, "obsp", dict(adata.obsp), dataset_kwargs=kwargs)
write_elem(group, "varp", dict(adata.varp), dataset_kwargs=kwargs)
write_elem(group, "layers", dict(adata.layers), dataset_kwargs=kwargs)
write_elem(group, "uns", dict(adata.uns), dataset_kwargs=kwargs)
attrs = group.attrs
attrs["encoding-type"] = "anndata"
attrs["encoding-version"] = __anndataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
mod_attrs = mod.attrs
mod_attrs["mod-order"] = list(mdata.mod.keys())
attrs = file.attrs
attrs["encoding-type"] = "MuData"
attrs["encoding-version"] = __mudataversion__
attrs["encoder"] = "mudata"
attrs["encoder-version"] = __version__
if not mdata.is_view or not mdata.isbacked:
mdata.update()
def write_h5mu(filename: PathLike, mdata: MuData, **kwargs):
from .. import __version__, __mudataversion__, __anndataversion__
with h5py.File(filename, "w", userblock_size=512) as f:
_write_h5mu(f, mdata, **kwargs)
with open(filename, "br+") as f:
nbytes = f.write(
f"MuData (format-version={__mudataversion__};creator=muon;creator-version={__version__})".encode(
"utf-8"
)
)
f.write(
b"\0" * (512 - nbytes)
)
def write_h5ad(filename: PathLike, mod: str, data: Union[MuData, AnnData]):
from anndata._io.specs.registry import write_elem
from anndata._io.h5ad import write_h5ad
from .. import __version__, __anndataversion__
if isinstance(data, AnnData):
adata = data
elif isinstance(data, MuData):
adata = data.mod[mod]
else:
raise TypeError(f"Expected AnnData or MuData object with {mod} modality")
with h5py.File(filename, "r+") as f:
if not "mod" in f:
raise ValueError("The .h5mu object has to contain .mod slot")
fm = f["mod"]
if mod in fm:
del fm[mod]
fmd = fm.create_group(mod)
adata.strings_to_categoricals()
if adata.raw is not None:
adata.strings_to_categoricals(adata.raw.var)
filepath = Path(filename)
if not (adata.isbacked and Path(adata.filename) == Path(filepath)):
write_elem(fmd, f"X", adata.X)
if adata.raw is not None:
write_elem(f, f"mod/{mod}/raw", adata.raw)
write_elem(fmd, "obs", adata.obs)
write_elem(fmd, "var", adata.var)
write_elem(fmd, "obsm", dict(adata.obsm))
write_elem(fmd, "varm", dict(adata.varm))
write_elem(fmd, "obsp", dict(adata.obsp))
write_elem(fmd, "varp", dict(adata.varp))
write_elem(fmd, "layers", dict(adata.layers))
write_elem(fmd, "uns", dict(adata.uns))
attrs = fmd.attrs
attrs["encoding-type"] = "anndata"
attrs["encoding-version"] = __anndataversion__
attrs["encoder"] = "muon"
attrs["encoder-version"] = __version__
write_anndata = write_h5ad
def write(filename: PathLike, data: Union[MuData, AnnData]):
import re
if filename.endswith(".h5mu") or isinstance(data, MuData):
assert filename.endswith(".h5mu") and isinstance(
data, MuData
), "Can only save MuData object to .h5mu file"
write_h5mu(filename, data)
else:
assert isinstance(data, AnnData), "Only MuData and AnnData objects are accepted"
m = re.search("^(.+)\.(h5mu)[/]?([A-Za-z]*)[/]?([/A-Za-z]*)$", filename)
if m is not None:
m = m.groups()
else:
raise ValueError("Expected non-empty .h5ad or .h5mu file name")
filepath = ".".join([m[0], m[1]])
if m[1] == "h5mu":
if m[3] == "":
return write_h5ad(filepath, m[2], data)
elif m[2] == "mod":
return write_h5ad(filepath, m[3], data)
else:
raise ValueError(
"If a single modality to be written from a .h5mu file, \
provide it after the filename separated by slash symbol:\
.h5mu/rna or .h5mu/mod/rna"
)
elif m[1] == "h5ad":
return data.write(filepath)
else:
raise ValueError()
def read_h5mu(filename: PathLike, backed: Union[str, bool, None] = None):
assert backed in [
None,
True,
False,
"r",
"r+",
], "Argument `backed` should be boolean, or r/r+, or None"
from anndata._io.specs.registry import read_elem
from anndata._io.h5ad import read_dataframe
if backed is True or not backed:
mode = "r"
else:
mode = backed
manager = MuDataFileManager(filename, mode) if backed else MuDataFileManager()
with open(filename, "rb") as f:
ish5mu = f.read(6) == b"MuData"
if not ish5mu:
if h5py.is_hdf5(filename):
warn(
"The HDF5 file was not created by muon, we can't guarantee that everything will work correctly"
)
else:
raise ValueError("The file is not an HDF5 file")
with h5py.File(filename, mode) as f:
d = {}
for k in f.keys():
if k in ["obs", "var"]:
d[k] = read_dataframe(f[k])
if k == "mod":
mods = {}
gmods = f[k]
for m in gmods.keys():
ad = _read_h5mu_mod(gmods[m], manager, backed not in (None, False))
mods[m] = ad
mod_order = None
if "mod-order" in gmods.attrs:
mod_order = gmods.attrs["mod-order"]
# TODO: use in v0.8
# mod_order = _read_hdf5_attribute(k, "mod-order")
if mod_order is not None and all([m in gmods for m in mod_order]):
mods = {k: mods[k] for k in mod_order}
d[k] = mods
else:
d[k] = read_elem(f[k])
if "axis" in f.attrs:
d["axis"] = f.attrs["axis"]
mu = MuData._init_from_dict_(**d)
mu.file = manager
return mu
def read_zarr(store: Union[str, Path, MutableMapping, zarr.Group]):
import zarr
from anndata._io.specs.registry import read_elem
from anndata._io.zarr import (
read_zarr as anndata_read_zarr,
read_dataframe,
_read_legacy_raw,
_clean_uns,
)
if isinstance(store, Path):
store = str(store)
f = zarr.open(store, mode="r")
d = {}
if "mod" not in f.keys():
return anndata_read_zarr(store)
manager = MuDataFileManager()
for k in f.keys():
if k in {"obs", "var"}:
d[k] = read_dataframe(f[k])
if k == "mod":
mods = {}
gmods = f[k]
for m in gmods.keys():
ad = _read_zarr_mod(gmods[m], manager)
mods[m] = ad
d[k] = mods
else: # Base case
d[k] = read_elem(f[k])
mu = MuData._init_from_dict_(**d)
mu.file = manager
return mu
def _read_zarr_mod(g: zarr.Group, manager: MuDataFileManager = None, backed: bool = False) -> dict:
import zarr
from anndata._io.specs.registry import read_elem
from anndata._io.zarr import read_dataframe, _read_legacy_raw
from anndata import Raw
d = {}
for k in g.keys():
if k in ("obs", "var"):
d[k] = read_dataframe(g[k])
elif k == "X":
X = g["X"]
if isinstance(X, zarr.Group):
dtype = X["data"].dtype
elif hasattr(X, "dtype"):
dtype = X.dtype
else:
raise ValueError()
d["dtype"] = dtype
if not backed:
d["X"] = read_elem(X)
elif k != "raw":
d[k] = read_elem(g[k])
ad = AnnData(**d)
if manager is not None:
ad.file = AnnDataFileManager(ad, os.path.basename(g.name), manager)
raw = _read_legacy_raw(
g,
d.get("raw"),
read_dataframe,
read_elem,
attrs=("var", "varm") if backed else ("var", "varm", "X"),
)
if raw:
ad._raw = Raw(ad, **raw)
return ad
def _read_h5mu_mod(
g: "h5py.Group", manager: MuDataFileManager = None, backed: bool = False
) -> dict:
from anndata._io.specs.registry import read_elem
from anndata._io.h5ad import read_dataframe, _read_raw
from anndata import Raw
d = {}
for k in g.keys():
if k in ("obs", "var"):
d[k] = read_dataframe(g[k])
elif k == "X":
X = g["X"]
if isinstance(X, h5py.Group):
dtype = X["data"].dtype
elif hasattr(X, "dtype"):
dtype = X.dtype
else:
raise ValueError()
d["dtype"] = dtype
if not backed:
d["X"] = read_elem(X)
elif k != "raw":
d[k] = read_elem(g[k])
ad = AnnData(**d)
if manager is not None:
ad.file = AnnDataFileManager(ad, os.path.basename(g.name), manager)
raw = _read_raw(g, attrs=("var", "varm") if backed else ("var", "varm", "X"))
if raw:
ad._raw = Raw(ad, **raw)
return ad
def read_h5ad(
filename: PathLike,
mod: str,
backed: Union[str, bool, None] = None,
) -> AnnData:
assert backed in [
None,
True,
False,
"r",
"r+",
], "Argument `backed` should be boolean, or r/r+, or None"
from anndata._io.specs.registry import read_elem
from anndata._io.h5ad import read_dataframe, _read_raw
d = {}
hdf5_mode = "r"
if backed not in {None, False}:
hdf5_mode = backed
if hdf5_mode is True:
hdf5_mode = "r+"
assert hdf5_mode in {"r", "r+"}
backed = True
manager = MuDataFileManager(filename, hdf5_mode)
else:
backed = False
manager = None
with h5py.File(filename, hdf5_mode) as f_root:
f = f_root["mod"][mod]
return _read_h5mu_mod(f, manager, backed)
read_anndata = read_h5ad
def read(filename: PathLike, **kwargs) -> Union[MuData, AnnData]:
import re
m = re.search("^(.+)\.(h5mu)[/]?([A-Za-z]*)[/]?([/A-Za-z]*)$", filename)
if m is not None:
m = m.groups()
else:
if filename.endswith(".h5ad"):
m = [filename[:-5], "h5ad", "", ""]
else:
raise ValueError("Expected non-empty .h5ad or .h5mu file name")
filepath = ".".join([m[0], m[1]])
if m[1] == "h5mu":
if all(i == 0 for i in map(len, m[2:])):
# Ends with .h5mu
return read_h5mu(filepath, **kwargs)
elif m[3] == "":
# .h5mu/<modality>
return read_h5ad(filepath, m[2], **kwargs)
elif m[2] == "mod":
# .h5mu/mod/<modality>
return read_h5ad(filepath, m[3], **kwargs)
else:
raise ValueError(
"If a single modality to be read from a .h5mu file, \
provide it after the filename separated by slash symbol:\
.h5mu/rna or .h5mu/mod/rna"
)
elif m[1] == "h5ad":
return ad.read_h5ad(filepath, **kwargs)
else:
raise ValueError("The file format is not recognised, expected to be an .h5mu or .h5ad file")
| true
| true
|
1c4974b9fca1aa6488c9bc567b5f3b3cb8f9a5fd
| 3,464
|
py
|
Python
|
salt/modules/sysmod.py
|
ageron/salt
|
72a0a89011e55ce7c875e948b5f0e97e70328153
|
[
"Apache-2.0"
] | 2
|
2019-03-30T02:12:56.000Z
|
2021-03-08T18:59:46.000Z
|
salt/modules/sysmod.py
|
ageron/salt
|
72a0a89011e55ce7c875e948b5f0e97e70328153
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/sysmod.py
|
ageron/salt
|
72a0a89011e55ce7c875e948b5f0e97e70328153
|
[
"Apache-2.0"
] | null | null | null |
'''
The sys module provides information about the available functions on the
minion.
'''
# Import python libs
import logging
# Import salt libs
# TODO: should probably use _getargs() from salt.utils?
from salt.state import _getargs
log = logging.getLogger(__name__)
def __virtual__():
'''
Return as sys
'''
return 'sys'
def doc(module=''):
'''
Return the docstrings for all modules. Optionally, specify a module or a
function to narrow the selection.
The strings are aggregated into a single document on the master for easy
reading.
CLI Example::
salt '*' sys.doc
salt '*' sys.doc sys
salt '*' sys.doc sys.doc
'''
docs = {}
if module:
# allow both "sys" and "sys." to match sys, without also matching
# sysctl
target_mod = module + '.' if not module.endswith('.') else module
else:
target_mod = ''
for fun in __salt__:
if fun == module or fun.startswith(target_mod):
docs[fun] = __salt__[fun].__doc__
return docs
def list_functions(module=''):
'''
List the functions for all modules. Optionally, specify a module to list
from.
CLI Example::
salt '*' sys.list_functions
salt '*' sys.list_functions sys
'''
names = set()
if module:
# allow both "sys" and "sys." to match sys, without also matching
# sysctl
module = module + '.' if not module.endswith('.') else module
for func in __salt__:
if func.startswith(module):
names.add(func)
return sorted(names)
def list_modules():
'''
List the modules loaded on the minion
CLI Example::
salt '*' sys.list_modules
'''
modules = set()
for func in __salt__:
comps = func.split('.')
if len(comps) < 2:
continue
modules.add(comps[0])
return sorted(modules)
def reload_modules():
'''
Tell the minion to reload the execution modules
CLI Example::
salt '*' sys.reload_modules
'''
# This is handled inside the minion.py file, the function is caught before
# it ever gets here
return True
def argspec(module=''):
'''
Return the argument specification of functions in Salt execution
modules.
CLI Example::
salt '*' sys.argspec pkg.install
salt '*' sys.argspec sys
salt '*' sys.argspec
'''
ret = {}
# TODO: cp.get_file will also match cp.get_file_str. this is the
# same logic as sys.doc, and it is not working as expected, see
# issue #3614
if module:
# allow both "sys" and "sys." to match sys, without also matching
# sysctl
comps = module.split('.')
comps = filter(None, comps)
if len(comps) < 2:
module = module + '.' if not module.endswith('.') else module
for fun in __salt__:
if fun.startswith(module):
try:
aspec = _getargs(__salt__[fun])
except TypeError:
# this happens if not callable
continue
args, varargs, kwargs, defaults = aspec
ret[fun] = {}
ret[fun]['args'] = args if args else None
ret[fun]['defaults'] = defaults if defaults else None
ret[fun]['varargs'] = True if varargs else None
ret[fun]['kwargs'] = True if kwargs else None
return ret
| 24.920863
| 78
| 0.582852
|
import logging
from salt.state import _getargs
log = logging.getLogger(__name__)
def __virtual__():
return 'sys'
def doc(module=''):
docs = {}
if module:
target_mod = module + '.' if not module.endswith('.') else module
else:
target_mod = ''
for fun in __salt__:
if fun == module or fun.startswith(target_mod):
docs[fun] = __salt__[fun].__doc__
return docs
def list_functions(module=''):
names = set()
if module:
module = module + '.' if not module.endswith('.') else module
for func in __salt__:
if func.startswith(module):
names.add(func)
return sorted(names)
def list_modules():
modules = set()
for func in __salt__:
comps = func.split('.')
if len(comps) < 2:
continue
modules.add(comps[0])
return sorted(modules)
def reload_modules():
return True
def argspec(module=''):
ret = {}
if module:
comps = module.split('.')
comps = filter(None, comps)
if len(comps) < 2:
module = module + '.' if not module.endswith('.') else module
for fun in __salt__:
if fun.startswith(module):
try:
aspec = _getargs(__salt__[fun])
except TypeError:
continue
args, varargs, kwargs, defaults = aspec
ret[fun] = {}
ret[fun]['args'] = args if args else None
ret[fun]['defaults'] = defaults if defaults else None
ret[fun]['varargs'] = True if varargs else None
ret[fun]['kwargs'] = True if kwargs else None
return ret
| true
| true
|
1c49773883879141ec47340b240f609fe8894f09
| 518
|
py
|
Python
|
tests/test_functions.py
|
brisvag/mdocfile
|
abab15dac94460de7c62d339d7a2d497bbb722fd
|
[
"BSD-3-Clause"
] | 1
|
2022-02-23T02:42:35.000Z
|
2022-02-23T02:42:35.000Z
|
tests/test_functions.py
|
brisvag/mdocfile
|
abab15dac94460de7c62d339d7a2d497bbb722fd
|
[
"BSD-3-Clause"
] | 1
|
2022-03-28T13:11:37.000Z
|
2022-03-30T14:19:31.000Z
|
tests/test_functions.py
|
brisvag/mdocfile
|
abab15dac94460de7c62d339d7a2d497bbb722fd
|
[
"BSD-3-Clause"
] | 1
|
2022-03-18T13:23:08.000Z
|
2022-03-18T13:23:08.000Z
|
import pandas as pd
import pytest
from mdocfile.functions import read
@pytest.mark.parametrize(
'camel_to_snake', [True, False]
)
def test_read(tilt_series_mdoc_file, camel_to_snake: bool):
df = read(tilt_series_mdoc_file, camel_to_snake=camel_to_snake)
print(camel_to_snake, len(df.columns))
print(df.columns)
assert isinstance(df, pd.DataFrame)
assert df.shape == (41, 26)
if camel_to_snake:
assert 'tilt_angle' in df.columns
else:
assert 'TiltAngle' in df.columns
| 25.9
| 67
| 0.720077
|
import pandas as pd
import pytest
from mdocfile.functions import read
@pytest.mark.parametrize(
'camel_to_snake', [True, False]
)
def test_read(tilt_series_mdoc_file, camel_to_snake: bool):
df = read(tilt_series_mdoc_file, camel_to_snake=camel_to_snake)
print(camel_to_snake, len(df.columns))
print(df.columns)
assert isinstance(df, pd.DataFrame)
assert df.shape == (41, 26)
if camel_to_snake:
assert 'tilt_angle' in df.columns
else:
assert 'TiltAngle' in df.columns
| true
| true
|
1c49774d2ad0cf760e33d25aee3e251a29965c7f
| 32,566
|
py
|
Python
|
pyhap/camera.py
|
sander-vd/HAP-python
|
991761ceadfd7796d454d61c87be7f5d4b75d432
|
[
"Apache-2.0"
] | 3
|
2019-12-07T22:42:38.000Z
|
2022-01-20T08:44:46.000Z
|
pyhap/camera.py
|
sander-vd/HAP-python
|
991761ceadfd7796d454d61c87be7f5d4b75d432
|
[
"Apache-2.0"
] | null | null | null |
pyhap/camera.py
|
sander-vd/HAP-python
|
991761ceadfd7796d454d61c87be7f5d4b75d432
|
[
"Apache-2.0"
] | 1
|
2021-05-15T22:34:52.000Z
|
2021-05-15T22:34:52.000Z
|
"""Contains the Camera accessory and related.
When a HAP client (e.g. iOS) wants to start a video stream it does the following:
[0. Read supported RTP configuration]
[0. Read supported video configuration]
[0. Read supported audio configuration]
[0. Read the current streaming status]
1. Sets the SetupEndpoints characteristic to notify the camera about its IP address,
selected security parameters, etc.
2. The camera responds to the above by setting the SetupEndpoints with its IP address,
etc.
3. The client sets the SelectedRTPStreamConfiguration characteristic to notify the
camera of its prefered audio and video configuration and to initiate the start of the
streaming.
4. The camera starts the streaming with the above configuration.
[5. At some point the client can reconfigure or stop the stream similarly to step 3.]
"""
import asyncio
import functools
import os
import ipaddress
import logging
import struct
from uuid import UUID
from pyhap import RESOURCE_DIR
from pyhap.accessory import Accessory
from pyhap.const import CATEGORY_CAMERA
from pyhap.util import to_base64_str, byte_bool
from pyhap import tlv
SETUP_TYPES = {
'SESSION_ID': b'\x01',
'STATUS': b'\x02',
'ADDRESS': b'\x03',
'VIDEO_SRTP_PARAM': b'\x04',
'AUDIO_SRTP_PARAM': b'\x05',
'VIDEO_SSRC': b'\x06',
'AUDIO_SSRC': b'\x07'
}
SETUP_STATUS = {
'SUCCESS': b'\x00',
'BUSY': b'\x01',
'ERROR': b'\x02'
}
SETUP_IPV = {
'IPV4': b'\x00',
'IPV6': b'\x01'
}
SETUP_ADDR_INFO = {
'ADDRESS_VER': b'\x01',
'ADDRESS': b'\x02',
'VIDEO_RTP_PORT': b'\x03',
'AUDIO_RTP_PORT': b'\x04'
}
SETUP_SRTP_PARAM = {
'CRYPTO': b'\x01',
'MASTER_KEY': b'\x02',
'MASTER_SALT': b'\x03'
}
STREAMING_STATUS = {
'AVAILABLE': b'\x00',
'STREAMING': b'\x01',
'BUSY': b'\x02'
}
RTP_CONFIG_TYPES = {
'CRYPTO': b'\x02'
}
SRTP_CRYPTO_SUITES = {
'AES_CM_128_HMAC_SHA1_80': b'\x00',
'AES_CM_256_HMAC_SHA1_80': b'\x01',
'NONE': b'\x02'
}
VIDEO_TYPES = {
'CODEC': b'\x01',
'CODEC_PARAM': b'\x02',
'ATTRIBUTES': b'\x03',
'RTP_PARAM': b'\x04'
}
VIDEO_CODEC_TYPES = {
'H264': b'\x00'
}
VIDEO_CODEC_PARAM_TYPES = {
'PROFILE_ID': b'\x01',
'LEVEL': b'\x02',
'PACKETIZATION_MODE': b'\x03',
'CVO_ENABLED': b'\x04',
'CVO_ID': b'\x05'
}
VIDEO_CODEC_PARAM_CVO_TYPES = {
'UNSUPPORTED': b'\x01',
'SUPPORTED': b'\x02'
}
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES = {
'BASELINE': b'\x00',
'MAIN': b'\x01',
'HIGH': b'\x02'
}
VIDEO_CODEC_PARAM_LEVEL_TYPES = {
'TYPE3_1': b'\x00',
'TYPE3_2': b'\x01',
'TYPE4_0': b'\x02'
}
VIDEO_CODEC_PARAM_PACKETIZATION_MODE_TYPES = {
'NON_INTERLEAVED': b'\x00'
}
VIDEO_ATTRIBUTES_TYPES = {
'IMAGE_WIDTH': b'\x01',
'IMAGE_HEIGHT': b'\x02',
'FRAME_RATE': b'\x03'
}
SUPPORTED_VIDEO_CONFIG_TAG = b'\x01'
SELECTED_STREAM_CONFIGURATION_TYPES = {
'SESSION': b'\x01',
'VIDEO': b'\x02',
'AUDIO': b'\x03'
}
RTP_PARAM_TYPES = {
'PAYLOAD_TYPE': b'\x01',
'SYNCHRONIZATION_SOURCE': b'\x02',
'MAX_BIT_RATE': b'\x03',
'RTCP_SEND_INTERVAL': b'\x04',
'MAX_MTU': b'\x05',
'COMFORT_NOISE_PAYLOAD_TYPE': b'\x06'
}
AUDIO_TYPES = {
'CODEC': b'\x01',
'CODEC_PARAM': b'\x02',
'RTP_PARAM': b'\x03',
'COMFORT_NOISE': b'\x04'
}
AUDIO_CODEC_TYPES = {
'PCMU': b'\x00',
'PCMA': b'\x01',
'AACELD': b'\x02',
'OPUS': b'\x03'
}
AUDIO_CODEC_PARAM_TYPES = {
'CHANNEL': b'\x01',
'BIT_RATE': b'\x02',
'SAMPLE_RATE': b'\x03',
'PACKET_TIME': b'\x04'
}
AUDIO_CODEC_PARAM_BIT_RATE_TYPES = {
'VARIABLE': b'\x00',
'CONSTANT': b'\x01'
}
AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES = {
'KHZ_8': b'\x00',
'KHZ_16': b'\x01',
'KHZ_24': b'\x02'
}
SUPPORTED_AUDIO_CODECS_TAG = b'\x01'
SUPPORTED_COMFORT_NOISE_TAG = b'\x02'
SUPPORTED_AUDIO_CONFIG_TAG = b'\x02'
SET_CONFIG_REQUEST_TAG = b'\x02'
SESSION_ID = b'\x01'
NO_SRTP = b'\x01\x01\x02\x02\x00\x03\x00'
'''Configuration value for no SRTP.'''
FFMPEG_CMD = (
# pylint: disable=bad-continuation
'ffmpeg -re -f avfoundation -i 0:0 -threads 0 '
'-vcodec libx264 -an -pix_fmt yuv420p -r {fps} -f rawvideo -tune zerolatency '
'-vf scale={width}:{height} -b:v {v_max_bitrate}k -bufsize {v_max_bitrate}k '
'-payload_type 99 -ssrc {v_ssrc} -f rtp '
'-srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params {v_srtp_key} '
'srtp://{address}:{v_port}?rtcpport={v_port}&'
'localrtcpport={v_port}&pkt_size=1378'
)
'''Template for the ffmpeg command.'''
class Camera(Accessory):
"""An Accessory that can negotiated camera stream settings with iOS and start a
stream.
"""
category = CATEGORY_CAMERA
@staticmethod
def get_supported_rtp_config(support_srtp):
"""Return a tlv representation of the RTP configuration we support.
SRTP support allows only the AES_CM_128_HMAC_SHA1_80 cipher for now.
:param support_srtp: True if SRTP is supported, False otherwise.
:type support_srtp: bool
"""
if support_srtp:
crypto = SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80']
else:
crypto = SRTP_CRYPTO_SUITES['NONE']
return tlv.encode(RTP_CONFIG_TYPES['CRYPTO'], crypto, to_base64=True)
@staticmethod
def get_supported_video_stream_config(video_params):
"""Return a tlv representation of the supported video stream configuration.
Expected video parameters:
- codec
- resolutions
:param video_params: Supported video configurations
:type video_params: dict
"""
codec_params_tlv = tlv.encode(
VIDEO_CODEC_PARAM_TYPES['PACKETIZATION_MODE'],
VIDEO_CODEC_PARAM_PACKETIZATION_MODE_TYPES['NON_INTERLEAVED'])
codec_params = video_params['codec']
for profile in codec_params['profiles']:
codec_params_tlv += \
tlv.encode(VIDEO_CODEC_PARAM_TYPES['PROFILE_ID'], profile)
for level in codec_params['levels']:
codec_params_tlv += \
tlv.encode(VIDEO_CODEC_PARAM_TYPES['LEVEL'], level)
attr_tlv = b''
for resolution in video_params['resolutions']:
res_tlv = tlv.encode(
VIDEO_ATTRIBUTES_TYPES['IMAGE_WIDTH'], struct.pack('<H', resolution[0]),
VIDEO_ATTRIBUTES_TYPES['IMAGE_HEIGHT'], struct.pack('<H', resolution[1]),
VIDEO_ATTRIBUTES_TYPES['FRAME_RATE'], struct.pack('<H', resolution[2]))
attr_tlv += tlv.encode(VIDEO_TYPES['ATTRIBUTES'], res_tlv)
config_tlv = tlv.encode(VIDEO_TYPES['CODEC'], VIDEO_CODEC_TYPES['H264'],
VIDEO_TYPES['CODEC_PARAM'], codec_params_tlv)
return tlv.encode(SUPPORTED_VIDEO_CONFIG_TAG, config_tlv + attr_tlv,
to_base64=True)
@staticmethod
def get_supported_audio_stream_config(audio_params):
"""Return a tlv representation of the supported audio stream configuration.
iOS supports only AACELD and OPUS
Expected audio parameters:
- codecs
- comfort_noise
:param audio_params: Supported audio configurations
:type audio_params: dict
"""
has_supported_codec = False
configs = b''
for codec_param in audio_params['codecs']:
param_type = codec_param['type']
if param_type == 'OPUS':
has_supported_codec = True
codec = AUDIO_CODEC_TYPES['OPUS']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
elif param_type == 'AAC-eld':
has_supported_codec = True
codec = AUDIO_CODEC_TYPES['AACELD']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
else:
logging.warning('Unsupported codec %s', param_type)
continue
param_samplerate = codec_param['samplerate']
if param_samplerate == 8:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_8']
elif param_samplerate == 16:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_16']
elif param_samplerate == 24:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_24']
else:
logging.warning('Unsupported sample rate %s', param_samplerate)
continue
param_tlv = tlv.encode(AUDIO_CODEC_PARAM_TYPES['CHANNEL'], b'\x01',
AUDIO_CODEC_PARAM_TYPES['BIT_RATE'], bitrate,
AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE'], samplerate)
config_tlv = tlv.encode(AUDIO_TYPES['CODEC'], codec,
AUDIO_TYPES['CODEC_PARAM'], param_tlv)
configs += tlv.encode(SUPPORTED_AUDIO_CODECS_TAG, config_tlv)
if not has_supported_codec:
logging.warning('Client does not support any audio codec that iOS supports.')
codec = AUDIO_CODEC_TYPES['OPUS']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_24']
param_tlv = tlv.encode(
AUDIO_CODEC_PARAM_TYPES['CHANNEL'], b'\x01',
AUDIO_CODEC_PARAM_TYPES['BIT_RATE'], bitrate,
AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE'], samplerate)
config_tlv = tlv.encode(AUDIO_TYPES['CODEC'], codec,
AUDIO_TYPES['CODEC_PARAM'], param_tlv)
configs = tlv.encode(SUPPORTED_AUDIO_CODECS_TAG, config_tlv)
comfort_noise = byte_bool(
audio_params.get('comfort_noise', False))
audio_config = to_base64_str(
configs + tlv.encode(SUPPORTED_COMFORT_NOISE_TAG, comfort_noise))
return audio_config
def __init__(self, options, *args, **kwargs):
"""Initialize a camera accessory with the given options.
:param options: Describes the supported video and audio configuration
of this camera. Expected values are video, audio, srtp and address.
Example configuration:
.. code-block:: python
{
"video": {
"codec": {
"profiles": [
camera.VIDEO_CODEC_PARAM_PROFILE_ID_TYPES["BASELINE"],
],
"levels": [
camera.VIDEO_CODEC_PARAM_LEVEL_TYPES['TYPE3_1'],
],
},
"resolutions": [
[320, 240, 15], # Width, Height, framerate
[1024, 768, 30],
[640, 480, 30],
[640, 360, 30],
[480, 360, 30],
[480, 270, 30],
[320, 240, 30],
[320, 180, 30],
],
},
"audio": {
"codecs": [
{
'type': 'OPUS',
'samplerate': 24,
},
{
'type': 'AAC-eld',
'samplerate': 16
}
],
},
"address": "192.168.1.226", # Address from which the camera will stream
}
Additional optional values are:
- srtp - boolean, defaults to False. Whether the camera supports SRTP.
- start_stream_cmd - string specifying the command to be executed to start
the stream. The string can contain the keywords, corresponding to the
video and audio configuration that was negotiated between the camera
and the client. See the ``start`` method for a full list of parameters.
:type options: ``dict``
"""
self.streaming_status = STREAMING_STATUS['AVAILABLE']
self.has_srtp = options.get('srtp', False)
self.start_stream_cmd = options.get('start_stream_cmd', FFMPEG_CMD)
self.stream_address = options['address']
try:
ipaddress.IPv4Address(self.stream_address)
self.stream_address_isv6 = b'\x00'
except ValueError:
self.stream_address_isv6 = b'\x01'
self.sessions = {}
super().__init__(*args, **kwargs)
self.add_preload_service('Microphone')
management = self.add_preload_service('CameraRTPStreamManagement')
management.configure_char('StreamingStatus',
getter_callback=self._get_streaimg_status)
management.configure_char('SupportedRTPConfiguration',
value=self.get_supported_rtp_config(
options.get('srtp', False)))
management.configure_char('SupportedVideoStreamConfiguration',
value=self.get_supported_video_stream_config(
options['video']))
management.configure_char('SupportedAudioStreamConfiguration',
value=self.get_supported_audio_stream_config(
options['audio']))
management.configure_char('SelectedRTPStreamConfiguration',
setter_callback=self.set_selected_stream_configuration)
management.configure_char('SetupEndpoints',
setter_callback=self.set_endpoints)
async def _start_stream(self, objs, reconfigure): # pylint: disable=unused-argument
"""Start or reconfigure video streaming for the given session.
Schedules ``self.start_stream`` or ``self.reconfigure``.
No support for reconfigure currently.
:param objs: TLV-decoded SelectedRTPStreamConfiguration
:type objs: ``dict``
:param reconfigure: Whether the stream should be reconfigured instead of
started.
:type reconfigure: bool
"""
video_tlv = objs.get(SELECTED_STREAM_CONFIGURATION_TYPES['VIDEO'])
audio_tlv = objs.get(SELECTED_STREAM_CONFIGURATION_TYPES['AUDIO'])
opts = {}
if video_tlv:
video_objs = tlv.decode(video_tlv)
video_codec_params = video_objs.get(VIDEO_TYPES['CODEC_PARAM'])
if video_codec_params:
video_codec_param_objs = tlv.decode(video_codec_params)
opts['v_profile_id'] = \
video_codec_param_objs[VIDEO_CODEC_PARAM_TYPES['PROFILE_ID']]
opts['v_level'] = \
video_codec_param_objs[VIDEO_CODEC_PARAM_TYPES['LEVEL']]
video_attrs = video_objs.get(VIDEO_TYPES['ATTRIBUTES'])
if video_attrs:
video_attr_objs = tlv.decode(video_attrs)
opts['width'] = struct.unpack('<H',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['IMAGE_WIDTH']])[0]
opts['height'] = struct.unpack('<H',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['IMAGE_HEIGHT']])[0]
opts['fps'] = struct.unpack('<B',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['FRAME_RATE']])[0]
video_rtp_param = video_objs.get(VIDEO_TYPES['RTP_PARAM'])
if video_rtp_param:
video_rtp_param_objs = tlv.decode(video_rtp_param)
# TODO: Optionals, handle the case where they are missing
opts['v_ssrc'] = 1 or struct.unpack('<I',
video_rtp_param_objs.get(
RTP_PARAM_TYPES['SYNCHRONIZATION_SOURCE']))[0]
opts['v_payload_type'] = \
video_rtp_param_objs.get(RTP_PARAM_TYPES['PAYLOAD_TYPE'])
opts['v_max_bitrate'] = struct.unpack('<H',
video_rtp_param_objs.get(RTP_PARAM_TYPES['MAX_BIT_RATE']))[0]
opts['v_rtcp_interval'] = struct.unpack('<f',
video_rtp_param_objs.get(RTP_PARAM_TYPES['RTCP_SEND_INTERVAL']))[0]
opts['v_max_mtu'] = video_rtp_param_objs.get(RTP_PARAM_TYPES['MAX_MTU'])
if audio_tlv:
audio_objs = tlv.decode(audio_tlv)
opts['a_codec'] = audio_objs[AUDIO_TYPES['CODEC']]
audio_codec_param_objs = tlv.decode(
audio_objs[AUDIO_TYPES['CODEC_PARAM']])
audio_rtp_param_objs = tlv.decode(
audio_objs[AUDIO_TYPES['RTP_PARAM']])
opts['a_comfort_noise'] = audio_objs[AUDIO_TYPES['COMFORT_NOISE']]
opts['a_channel'] = \
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['CHANNEL']][0]
opts['a_bitrate'] = struct.unpack('?',
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['BIT_RATE']])[0]
opts['a_sample_rate'] = 8 * (
1 + audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE']][0])
opts['a_packet_time'] = struct.unpack('<B',
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['PACKET_TIME']])[0]
opts['a_ssrc'] = struct.unpack('<I',
audio_rtp_param_objs[RTP_PARAM_TYPES['SYNCHRONIZATION_SOURCE']])[0]
opts['a_payload_type'] = audio_rtp_param_objs[RTP_PARAM_TYPES['PAYLOAD_TYPE']]
opts['a_max_bitrate'] = struct.unpack('<H',
audio_rtp_param_objs[RTP_PARAM_TYPES['MAX_BIT_RATE']])[0]
opts['a_rtcp_interval'] = struct.unpack('<f',
audio_rtp_param_objs[RTP_PARAM_TYPES['RTCP_SEND_INTERVAL']])[0]
opts['a_comfort_payload_type'] = \
audio_rtp_param_objs[RTP_PARAM_TYPES['COMFORT_NOISE_PAYLOAD_TYPE']]
session_objs = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
session_id = UUID(bytes=session_objs[SETUP_TYPES['SESSION_ID']])
session_info = self.sessions[session_id]
opts.update(session_info)
success = await self.reconfigure_stream(session_info, opts) if reconfigure \
else await self.start_stream(session_info, opts)
if success:
self.streaming_status = STREAMING_STATUS['STREAMING']
else:
logging.error('[%s] Faled to start/reconfigure stream, deleting session.',
session_id)
del self.sessions[session_id]
self.streaming_status = STREAMING_STATUS['AVAILABLE']
def _get_streaimg_status(self):
"""Get the streaming status in TLV format.
Called when iOS reads the StreaminStatus ``Characteristic``.
"""
return tlv.encode(b'\x01', self.streaming_status, to_base64=True)
async def _stop_stream(self, objs):
"""Stop the stream for the specified session.
Schedules ``self.stop_stream``.
:param objs: TLV-decoded SelectedRTPStreamConfiguration value.
:param objs: ``dict``
"""
session_objs = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
session_id = UUID(bytes=session_objs[SETUP_TYPES['SESSION_ID']])
session_info = self.sessions.get(session_id)
if not session_info:
logging.error('Requested to stop stream for session %s, but no '
'such session was found', session_id)
return
await self.stop_stream(session_info)
del self.sessions[session_id]
self.streaming_status = STREAMING_STATUS['AVAILABLE']
def set_selected_stream_configuration(self, value):
"""Set the selected stream configuration.
Called from iOS to set the SelectedRTPStreamConfiguration ``Characteristic``.
This method schedules a stream for the session in ``value`` to be start, stopped
or reconfigured, depending on the request.
:param value: base64-encoded selected configuration in TLV format
:type value: ``str``
"""
logging.debug('set_selected_stream_config - value - %s', value)
objs = tlv.decode(value, from_base64=True)
if SELECTED_STREAM_CONFIGURATION_TYPES['SESSION'] not in objs:
logging.error('Bad request to set selected stream configuration.')
return
session = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
request_type = session[b'\x02'][0]
logging.debug('Set stream config request: %d', request_type)
if request_type == 1:
job = functools.partial(self._start_stream, reconfigure=False)
elif request_type == 0:
job = self._stop_stream
elif request_type == 4:
job = functools.partial(self._start_stream, reconfigure=True)
else:
logging.error('Unknown request type %d', request_type)
return
self.driver.add_job(job, objs)
def set_endpoints(self, value):
"""Configure streaming endpoints.
Called when iOS sets the SetupEndpoints ``Characteristic``. The endpoint
information for the camera should be set as the current value of SetupEndpoints.
:param value: The base64-encoded stream session details in TLV format.
:param value: ``str``
"""
objs = tlv.decode(value, from_base64=True)
session_id = UUID(bytes=objs[SETUP_TYPES['SESSION_ID']])
# Extract address info
address_tlv = objs[SETUP_TYPES['ADDRESS']]
address_info_objs = tlv.decode(address_tlv)
is_ipv6 = struct.unpack('?',
address_info_objs[SETUP_ADDR_INFO['ADDRESS_VER']])[0]
address = address_info_objs[SETUP_ADDR_INFO['ADDRESS']].decode('utf8')
target_video_port = struct.unpack(
'<H', address_info_objs[SETUP_ADDR_INFO['VIDEO_RTP_PORT']])[0]
target_audio_port = struct.unpack(
'<H', address_info_objs[SETUP_ADDR_INFO['AUDIO_RTP_PORT']])[0]
# Video SRTP Params
video_srtp_tlv = objs[SETUP_TYPES['VIDEO_SRTP_PARAM']]
video_info_objs = tlv.decode(video_srtp_tlv)
video_crypto_suite = video_info_objs[SETUP_SRTP_PARAM['CRYPTO']][0]
video_master_key = video_info_objs[SETUP_SRTP_PARAM['MASTER_KEY']]
video_master_salt = video_info_objs[SETUP_SRTP_PARAM['MASTER_SALT']]
# Audio SRTP Params
audio_srtp_tlv = objs[SETUP_TYPES['AUDIO_SRTP_PARAM']]
audio_info_objs = tlv.decode(audio_srtp_tlv)
audio_crypto_suite = audio_info_objs[SETUP_SRTP_PARAM['CRYPTO']][0]
audio_master_key = audio_info_objs[SETUP_SRTP_PARAM['MASTER_KEY']]
audio_master_salt = audio_info_objs[SETUP_SRTP_PARAM['MASTER_SALT']]
logging.debug('Received endpoint configuration:'
'\nsession_id: %s\naddress: %s\nis_ipv6: %s'
'\ntarget_video_port: %s\ntarget_audio_port: %s'
'\nvideo_crypto_suite: %s\nvideo_srtp: %s'
'\naudio_crypto_suite: %s\naudio_srtp: %s',
session_id, address, is_ipv6, target_video_port, target_audio_port,
video_crypto_suite,
to_base64_str(video_master_key + video_master_salt),
audio_crypto_suite,
to_base64_str(audio_master_key + audio_master_salt))
# Configure the SetupEndpoints response
if self.has_srtp:
video_srtp_tlv = tlv.encode(
SETUP_SRTP_PARAM['CRYPTO'], SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80'],
SETUP_SRTP_PARAM['MASTER_KEY'], video_master_key,
SETUP_SRTP_PARAM['MASTER_SALT'], video_master_salt)
audio_srtp_tlv = tlv.encode(
SETUP_SRTP_PARAM['CRYPTO'], SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80'],
SETUP_SRTP_PARAM['MASTER_KEY'], audio_master_key,
SETUP_SRTP_PARAM['MASTER_SALT'], audio_master_salt)
else:
video_srtp_tlv = NO_SRTP
audio_srtp_tlv = NO_SRTP
# TODO: Use os.urandom(4) but within the allowed value bounds
video_ssrc = b'\x01'
audio_ssrc = b'\x01'
res_address_tlv = tlv.encode(
SETUP_ADDR_INFO['ADDRESS_VER'], self.stream_address_isv6,
SETUP_ADDR_INFO['ADDRESS'], self.stream_address.encode('utf-8'),
SETUP_ADDR_INFO['VIDEO_RTP_PORT'], struct.pack('<H', target_video_port),
SETUP_ADDR_INFO['AUDIO_RTP_PORT'], struct.pack('<H', target_audio_port))
response_tlv = tlv.encode(
SETUP_TYPES['SESSION_ID'], session_id.bytes,
SETUP_TYPES['STATUS'], SETUP_STATUS['SUCCESS'],
SETUP_TYPES['ADDRESS'], res_address_tlv,
SETUP_TYPES['VIDEO_SRTP_PARAM'], video_srtp_tlv,
SETUP_TYPES['AUDIO_SRTP_PARAM'], audio_srtp_tlv,
SETUP_TYPES['VIDEO_SSRC'], video_ssrc,
SETUP_TYPES['AUDIO_SSRC'], audio_ssrc,
to_base64=True)
self.sessions[session_id] = {
'id': session_id,
'address': address,
'v_port': target_video_port,
'v_srtp_key': to_base64_str(video_master_key + video_master_salt),
# TODO: 'v_ssrc': video_ssrc,
'a_port': target_audio_port,
'audio_srtp_key': to_base64_str(audio_master_key + audio_master_salt),
'a_ssrc': audio_ssrc
}
self.get_service('CameraRTPStreamManagement')\
.get_characteristic('SetupEndpoints')\
.set_value(response_tlv)
async def stop(self):
"""Stop all streaming sessions."""
await asyncio.gather(*(
self.stop_stream(session_info) for session_info in self.sessions.values()))
# ### For client extensions ###
async def start_stream(self, session_info, stream_config):
"""Start a new stream with the given configuration.
This method can be implemented to start a new stream. Any specific information
about the started stream can be persisted in the ``session_info`` argument.
The same will be passed to ``stop_stream`` when the stream for this session
needs to be stopped.
The default implementation starts a new process with the command in
``self.start_stream_cmd``, formatted with the ``stream_config``.
:param session_info: Contains information about the current session. Can be used
for session storage. Available keys:
- id - The session ID.
:type session_info: ``dict``
:param stream_config: Stream configuration, as negotiated with the HAP client.
Implementations can only use part of these. Available keys:
General configuration:
- address - The IP address from which the camera will stream
- v_port - Remote port to which to stream video
- v_srtp_key - Base64-encoded key and salt value for the
AES_CM_128_HMAC_SHA1_80 cipher to use when streaming video.
The key and the salt are concatenated before encoding
- a_port - Remote audio port to which to stream audio
- a_srtp_key - As v_srtp_params, but for the audio stream.
Video configuration:
- v_profile_id - The profile ID for the H.264 codec, e.g. baseline.
Refer to ``VIDEO_CODEC_PARAM_PROFILE_ID_TYPES``.
- v_level - The level in the profile ID, e.g. 3:1.
Refer to ``VIDEO_CODEC_PARAM_LEVEL_TYPES``.
- width - Video width
- height - Video height
- fps - Video frame rate
- v_ssrc - Video synchronisation source
- v_payload_type - Type of the video codec
- v_max_bitrate - Maximum bit rate generated by the codec in kbps
and averaged over 1 second
- v_rtcp_interval - Minimum RTCP interval in seconds
- v_max_mtu - MTU that the IP camera must use to transmit
Video RTP packets.
Audio configuration:
- a_bitrate - Whether the bitrate is variable or constant
- a_codec - Audio codec
- a_comfort_noise - Wheter to use a comfort noise codec
- a_channel - Number of audio channels
- a_sample_rate - Audio sample rate in KHz
- a_packet_time - Length of time represented by the media in a packet
- a_ssrc - Audio synchronisation source
- a_payload_type - Type of the audio codec
- a_max_bitrate - Maximum bit rate generated by the codec in kbps
and averaged over 1 second
- a_rtcp_interval - Minimum RTCP interval in seconds
- a_comfort_payload_type - The type of codec for comfort noise
:return: True if and only if starting the stream command was successful.
:rtype: ``bool``
"""
logging.debug('[%s] Starting stream with the following parameters: %s',
session_info['id'], stream_config)
cmd = self.start_stream_cmd.format(**stream_config).split()
logging.debug('Executing start stream command: "%s"', ' '.join(cmd))
try:
process = await asyncio.create_subprocess_exec(*cmd,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.PIPE,
limit=1024)
except Exception as e: # pylint: disable=broad-except
logging.error('Failed to start streaming process because of error: %s', e)
return False
session_info['process'] = process
logging.info('[%s] Started stream process - PID %d',
session_info['id'], process.pid)
return True
async def stop_stream(self, session_info): # pylint: disable=no-self-use
"""Stop the stream for the given ``session_id``.
This method can be implemented if custom stop stream commands are needed. The
default implementation gets the ``process`` value from the ``session_info``
object and terminates it (assumes it is a ``subprocess.Popen`` object).
:param session_info: The session info object. Available keys:
- id - The session ID.
:type session_info: ``dict``
"""
session_id = session_info['id']
ffmpeg_process = session_info.get('process')
if ffmpeg_process:
logging.info('[%s] Stopping stream.', session_id)
try:
ffmpeg_process.terminate()
_, stderr = await asyncio.wait_for(
ffmpeg_process.communicate(), timeout=2.0)
logging.debug('Stream command stderr: %s', stderr)
except asyncio.TimeoutError:
logging.error('Timeout while waiting for the stream process '
'to terminate. Trying with kill.')
ffmpeg_process.kill()
await ffmpeg_process.wait()
logging.debug('Stream process stopped.')
else:
logging.warning('No process for session ID %s', session_id)
async def reconfigure_stream(self, session_info, stream_config):
"""Reconfigure the stream so that it uses the given ``stream_config``.
:param session_info: The session object for the session that needs to
be reconfigured. Available keys:
- id - The session id.
:type session_id: ``dict``
:return: True if and only if the reconfiguration is successful.
:rtype: ``bool``
"""
await self.start_stream(session_info, stream_config)
def get_snapshot(self, image_size): # pylint: disable=unused-argument, no-self-use
"""Return a jpeg of a snapshot from the camera.
Overwrite to implement getting snapshots from your camera.
:param image_size: ``dict`` describing the requested image size. Contains the
keys "image-width" and "image-height"
"""
with open(os.path.join(RESOURCE_DIR, 'snapshot.jpg'), 'rb') as fp:
return fp.read()
| 38.67696
| 90
| 0.605171
|
import asyncio
import functools
import os
import ipaddress
import logging
import struct
from uuid import UUID
from pyhap import RESOURCE_DIR
from pyhap.accessory import Accessory
from pyhap.const import CATEGORY_CAMERA
from pyhap.util import to_base64_str, byte_bool
from pyhap import tlv
SETUP_TYPES = {
'SESSION_ID': b'\x01',
'STATUS': b'\x02',
'ADDRESS': b'\x03',
'VIDEO_SRTP_PARAM': b'\x04',
'AUDIO_SRTP_PARAM': b'\x05',
'VIDEO_SSRC': b'\x06',
'AUDIO_SSRC': b'\x07'
}
SETUP_STATUS = {
'SUCCESS': b'\x00',
'BUSY': b'\x01',
'ERROR': b'\x02'
}
SETUP_IPV = {
'IPV4': b'\x00',
'IPV6': b'\x01'
}
SETUP_ADDR_INFO = {
'ADDRESS_VER': b'\x01',
'ADDRESS': b'\x02',
'VIDEO_RTP_PORT': b'\x03',
'AUDIO_RTP_PORT': b'\x04'
}
SETUP_SRTP_PARAM = {
'CRYPTO': b'\x01',
'MASTER_KEY': b'\x02',
'MASTER_SALT': b'\x03'
}
STREAMING_STATUS = {
'AVAILABLE': b'\x00',
'STREAMING': b'\x01',
'BUSY': b'\x02'
}
RTP_CONFIG_TYPES = {
'CRYPTO': b'\x02'
}
SRTP_CRYPTO_SUITES = {
'AES_CM_128_HMAC_SHA1_80': b'\x00',
'AES_CM_256_HMAC_SHA1_80': b'\x01',
'NONE': b'\x02'
}
VIDEO_TYPES = {
'CODEC': b'\x01',
'CODEC_PARAM': b'\x02',
'ATTRIBUTES': b'\x03',
'RTP_PARAM': b'\x04'
}
VIDEO_CODEC_TYPES = {
'H264': b'\x00'
}
VIDEO_CODEC_PARAM_TYPES = {
'PROFILE_ID': b'\x01',
'LEVEL': b'\x02',
'PACKETIZATION_MODE': b'\x03',
'CVO_ENABLED': b'\x04',
'CVO_ID': b'\x05'
}
VIDEO_CODEC_PARAM_CVO_TYPES = {
'UNSUPPORTED': b'\x01',
'SUPPORTED': b'\x02'
}
VIDEO_CODEC_PARAM_PROFILE_ID_TYPES = {
'BASELINE': b'\x00',
'MAIN': b'\x01',
'HIGH': b'\x02'
}
VIDEO_CODEC_PARAM_LEVEL_TYPES = {
'TYPE3_1': b'\x00',
'TYPE3_2': b'\x01',
'TYPE4_0': b'\x02'
}
VIDEO_CODEC_PARAM_PACKETIZATION_MODE_TYPES = {
'NON_INTERLEAVED': b'\x00'
}
VIDEO_ATTRIBUTES_TYPES = {
'IMAGE_WIDTH': b'\x01',
'IMAGE_HEIGHT': b'\x02',
'FRAME_RATE': b'\x03'
}
SUPPORTED_VIDEO_CONFIG_TAG = b'\x01'
SELECTED_STREAM_CONFIGURATION_TYPES = {
'SESSION': b'\x01',
'VIDEO': b'\x02',
'AUDIO': b'\x03'
}
RTP_PARAM_TYPES = {
'PAYLOAD_TYPE': b'\x01',
'SYNCHRONIZATION_SOURCE': b'\x02',
'MAX_BIT_RATE': b'\x03',
'RTCP_SEND_INTERVAL': b'\x04',
'MAX_MTU': b'\x05',
'COMFORT_NOISE_PAYLOAD_TYPE': b'\x06'
}
AUDIO_TYPES = {
'CODEC': b'\x01',
'CODEC_PARAM': b'\x02',
'RTP_PARAM': b'\x03',
'COMFORT_NOISE': b'\x04'
}
AUDIO_CODEC_TYPES = {
'PCMU': b'\x00',
'PCMA': b'\x01',
'AACELD': b'\x02',
'OPUS': b'\x03'
}
AUDIO_CODEC_PARAM_TYPES = {
'CHANNEL': b'\x01',
'BIT_RATE': b'\x02',
'SAMPLE_RATE': b'\x03',
'PACKET_TIME': b'\x04'
}
AUDIO_CODEC_PARAM_BIT_RATE_TYPES = {
'VARIABLE': b'\x00',
'CONSTANT': b'\x01'
}
AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES = {
'KHZ_8': b'\x00',
'KHZ_16': b'\x01',
'KHZ_24': b'\x02'
}
SUPPORTED_AUDIO_CODECS_TAG = b'\x01'
SUPPORTED_COMFORT_NOISE_TAG = b'\x02'
SUPPORTED_AUDIO_CONFIG_TAG = b'\x02'
SET_CONFIG_REQUEST_TAG = b'\x02'
SESSION_ID = b'\x01'
NO_SRTP = b'\x01\x01\x02\x02\x00\x03\x00'
FFMPEG_CMD = (
'ffmpeg -re -f avfoundation -i 0:0 -threads 0 '
'-vcodec libx264 -an -pix_fmt yuv420p -r {fps} -f rawvideo -tune zerolatency '
'-vf scale={width}:{height} -b:v {v_max_bitrate}k -bufsize {v_max_bitrate}k '
'-payload_type 99 -ssrc {v_ssrc} -f rtp '
'-srtp_out_suite AES_CM_128_HMAC_SHA1_80 -srtp_out_params {v_srtp_key} '
'srtp://{address}:{v_port}?rtcpport={v_port}&'
'localrtcpport={v_port}&pkt_size=1378'
)
class Camera(Accessory):
category = CATEGORY_CAMERA
@staticmethod
def get_supported_rtp_config(support_srtp):
if support_srtp:
crypto = SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80']
else:
crypto = SRTP_CRYPTO_SUITES['NONE']
return tlv.encode(RTP_CONFIG_TYPES['CRYPTO'], crypto, to_base64=True)
@staticmethod
def get_supported_video_stream_config(video_params):
codec_params_tlv = tlv.encode(
VIDEO_CODEC_PARAM_TYPES['PACKETIZATION_MODE'],
VIDEO_CODEC_PARAM_PACKETIZATION_MODE_TYPES['NON_INTERLEAVED'])
codec_params = video_params['codec']
for profile in codec_params['profiles']:
codec_params_tlv += \
tlv.encode(VIDEO_CODEC_PARAM_TYPES['PROFILE_ID'], profile)
for level in codec_params['levels']:
codec_params_tlv += \
tlv.encode(VIDEO_CODEC_PARAM_TYPES['LEVEL'], level)
attr_tlv = b''
for resolution in video_params['resolutions']:
res_tlv = tlv.encode(
VIDEO_ATTRIBUTES_TYPES['IMAGE_WIDTH'], struct.pack('<H', resolution[0]),
VIDEO_ATTRIBUTES_TYPES['IMAGE_HEIGHT'], struct.pack('<H', resolution[1]),
VIDEO_ATTRIBUTES_TYPES['FRAME_RATE'], struct.pack('<H', resolution[2]))
attr_tlv += tlv.encode(VIDEO_TYPES['ATTRIBUTES'], res_tlv)
config_tlv = tlv.encode(VIDEO_TYPES['CODEC'], VIDEO_CODEC_TYPES['H264'],
VIDEO_TYPES['CODEC_PARAM'], codec_params_tlv)
return tlv.encode(SUPPORTED_VIDEO_CONFIG_TAG, config_tlv + attr_tlv,
to_base64=True)
@staticmethod
def get_supported_audio_stream_config(audio_params):
has_supported_codec = False
configs = b''
for codec_param in audio_params['codecs']:
param_type = codec_param['type']
if param_type == 'OPUS':
has_supported_codec = True
codec = AUDIO_CODEC_TYPES['OPUS']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
elif param_type == 'AAC-eld':
has_supported_codec = True
codec = AUDIO_CODEC_TYPES['AACELD']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
else:
logging.warning('Unsupported codec %s', param_type)
continue
param_samplerate = codec_param['samplerate']
if param_samplerate == 8:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_8']
elif param_samplerate == 16:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_16']
elif param_samplerate == 24:
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_24']
else:
logging.warning('Unsupported sample rate %s', param_samplerate)
continue
param_tlv = tlv.encode(AUDIO_CODEC_PARAM_TYPES['CHANNEL'], b'\x01',
AUDIO_CODEC_PARAM_TYPES['BIT_RATE'], bitrate,
AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE'], samplerate)
config_tlv = tlv.encode(AUDIO_TYPES['CODEC'], codec,
AUDIO_TYPES['CODEC_PARAM'], param_tlv)
configs += tlv.encode(SUPPORTED_AUDIO_CODECS_TAG, config_tlv)
if not has_supported_codec:
logging.warning('Client does not support any audio codec that iOS supports.')
codec = AUDIO_CODEC_TYPES['OPUS']
bitrate = AUDIO_CODEC_PARAM_BIT_RATE_TYPES['VARIABLE']
samplerate = AUDIO_CODEC_PARAM_SAMPLE_RATE_TYPES['KHZ_24']
param_tlv = tlv.encode(
AUDIO_CODEC_PARAM_TYPES['CHANNEL'], b'\x01',
AUDIO_CODEC_PARAM_TYPES['BIT_RATE'], bitrate,
AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE'], samplerate)
config_tlv = tlv.encode(AUDIO_TYPES['CODEC'], codec,
AUDIO_TYPES['CODEC_PARAM'], param_tlv)
configs = tlv.encode(SUPPORTED_AUDIO_CODECS_TAG, config_tlv)
comfort_noise = byte_bool(
audio_params.get('comfort_noise', False))
audio_config = to_base64_str(
configs + tlv.encode(SUPPORTED_COMFORT_NOISE_TAG, comfort_noise))
return audio_config
def __init__(self, options, *args, **kwargs):
self.streaming_status = STREAMING_STATUS['AVAILABLE']
self.has_srtp = options.get('srtp', False)
self.start_stream_cmd = options.get('start_stream_cmd', FFMPEG_CMD)
self.stream_address = options['address']
try:
ipaddress.IPv4Address(self.stream_address)
self.stream_address_isv6 = b'\x00'
except ValueError:
self.stream_address_isv6 = b'\x01'
self.sessions = {}
super().__init__(*args, **kwargs)
self.add_preload_service('Microphone')
management = self.add_preload_service('CameraRTPStreamManagement')
management.configure_char('StreamingStatus',
getter_callback=self._get_streaimg_status)
management.configure_char('SupportedRTPConfiguration',
value=self.get_supported_rtp_config(
options.get('srtp', False)))
management.configure_char('SupportedVideoStreamConfiguration',
value=self.get_supported_video_stream_config(
options['video']))
management.configure_char('SupportedAudioStreamConfiguration',
value=self.get_supported_audio_stream_config(
options['audio']))
management.configure_char('SelectedRTPStreamConfiguration',
setter_callback=self.set_selected_stream_configuration)
management.configure_char('SetupEndpoints',
setter_callback=self.set_endpoints)
async def _start_stream(self, objs, reconfigure): video_tlv = objs.get(SELECTED_STREAM_CONFIGURATION_TYPES['VIDEO'])
audio_tlv = objs.get(SELECTED_STREAM_CONFIGURATION_TYPES['AUDIO'])
opts = {}
if video_tlv:
video_objs = tlv.decode(video_tlv)
video_codec_params = video_objs.get(VIDEO_TYPES['CODEC_PARAM'])
if video_codec_params:
video_codec_param_objs = tlv.decode(video_codec_params)
opts['v_profile_id'] = \
video_codec_param_objs[VIDEO_CODEC_PARAM_TYPES['PROFILE_ID']]
opts['v_level'] = \
video_codec_param_objs[VIDEO_CODEC_PARAM_TYPES['LEVEL']]
video_attrs = video_objs.get(VIDEO_TYPES['ATTRIBUTES'])
if video_attrs:
video_attr_objs = tlv.decode(video_attrs)
opts['width'] = struct.unpack('<H',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['IMAGE_WIDTH']])[0]
opts['height'] = struct.unpack('<H',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['IMAGE_HEIGHT']])[0]
opts['fps'] = struct.unpack('<B',
video_attr_objs[VIDEO_ATTRIBUTES_TYPES['FRAME_RATE']])[0]
video_rtp_param = video_objs.get(VIDEO_TYPES['RTP_PARAM'])
if video_rtp_param:
video_rtp_param_objs = tlv.decode(video_rtp_param)
opts['v_ssrc'] = 1 or struct.unpack('<I',
video_rtp_param_objs.get(
RTP_PARAM_TYPES['SYNCHRONIZATION_SOURCE']))[0]
opts['v_payload_type'] = \
video_rtp_param_objs.get(RTP_PARAM_TYPES['PAYLOAD_TYPE'])
opts['v_max_bitrate'] = struct.unpack('<H',
video_rtp_param_objs.get(RTP_PARAM_TYPES['MAX_BIT_RATE']))[0]
opts['v_rtcp_interval'] = struct.unpack('<f',
video_rtp_param_objs.get(RTP_PARAM_TYPES['RTCP_SEND_INTERVAL']))[0]
opts['v_max_mtu'] = video_rtp_param_objs.get(RTP_PARAM_TYPES['MAX_MTU'])
if audio_tlv:
audio_objs = tlv.decode(audio_tlv)
opts['a_codec'] = audio_objs[AUDIO_TYPES['CODEC']]
audio_codec_param_objs = tlv.decode(
audio_objs[AUDIO_TYPES['CODEC_PARAM']])
audio_rtp_param_objs = tlv.decode(
audio_objs[AUDIO_TYPES['RTP_PARAM']])
opts['a_comfort_noise'] = audio_objs[AUDIO_TYPES['COMFORT_NOISE']]
opts['a_channel'] = \
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['CHANNEL']][0]
opts['a_bitrate'] = struct.unpack('?',
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['BIT_RATE']])[0]
opts['a_sample_rate'] = 8 * (
1 + audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['SAMPLE_RATE']][0])
opts['a_packet_time'] = struct.unpack('<B',
audio_codec_param_objs[AUDIO_CODEC_PARAM_TYPES['PACKET_TIME']])[0]
opts['a_ssrc'] = struct.unpack('<I',
audio_rtp_param_objs[RTP_PARAM_TYPES['SYNCHRONIZATION_SOURCE']])[0]
opts['a_payload_type'] = audio_rtp_param_objs[RTP_PARAM_TYPES['PAYLOAD_TYPE']]
opts['a_max_bitrate'] = struct.unpack('<H',
audio_rtp_param_objs[RTP_PARAM_TYPES['MAX_BIT_RATE']])[0]
opts['a_rtcp_interval'] = struct.unpack('<f',
audio_rtp_param_objs[RTP_PARAM_TYPES['RTCP_SEND_INTERVAL']])[0]
opts['a_comfort_payload_type'] = \
audio_rtp_param_objs[RTP_PARAM_TYPES['COMFORT_NOISE_PAYLOAD_TYPE']]
session_objs = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
session_id = UUID(bytes=session_objs[SETUP_TYPES['SESSION_ID']])
session_info = self.sessions[session_id]
opts.update(session_info)
success = await self.reconfigure_stream(session_info, opts) if reconfigure \
else await self.start_stream(session_info, opts)
if success:
self.streaming_status = STREAMING_STATUS['STREAMING']
else:
logging.error('[%s] Faled to start/reconfigure stream, deleting session.',
session_id)
del self.sessions[session_id]
self.streaming_status = STREAMING_STATUS['AVAILABLE']
def _get_streaimg_status(self):
return tlv.encode(b'\x01', self.streaming_status, to_base64=True)
async def _stop_stream(self, objs):
session_objs = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
session_id = UUID(bytes=session_objs[SETUP_TYPES['SESSION_ID']])
session_info = self.sessions.get(session_id)
if not session_info:
logging.error('Requested to stop stream for session %s, but no '
'such session was found', session_id)
return
await self.stop_stream(session_info)
del self.sessions[session_id]
self.streaming_status = STREAMING_STATUS['AVAILABLE']
def set_selected_stream_configuration(self, value):
logging.debug('set_selected_stream_config - value - %s', value)
objs = tlv.decode(value, from_base64=True)
if SELECTED_STREAM_CONFIGURATION_TYPES['SESSION'] not in objs:
logging.error('Bad request to set selected stream configuration.')
return
session = tlv.decode(objs[SELECTED_STREAM_CONFIGURATION_TYPES['SESSION']])
request_type = session[b'\x02'][0]
logging.debug('Set stream config request: %d', request_type)
if request_type == 1:
job = functools.partial(self._start_stream, reconfigure=False)
elif request_type == 0:
job = self._stop_stream
elif request_type == 4:
job = functools.partial(self._start_stream, reconfigure=True)
else:
logging.error('Unknown request type %d', request_type)
return
self.driver.add_job(job, objs)
def set_endpoints(self, value):
objs = tlv.decode(value, from_base64=True)
session_id = UUID(bytes=objs[SETUP_TYPES['SESSION_ID']])
address_tlv = objs[SETUP_TYPES['ADDRESS']]
address_info_objs = tlv.decode(address_tlv)
is_ipv6 = struct.unpack('?',
address_info_objs[SETUP_ADDR_INFO['ADDRESS_VER']])[0]
address = address_info_objs[SETUP_ADDR_INFO['ADDRESS']].decode('utf8')
target_video_port = struct.unpack(
'<H', address_info_objs[SETUP_ADDR_INFO['VIDEO_RTP_PORT']])[0]
target_audio_port = struct.unpack(
'<H', address_info_objs[SETUP_ADDR_INFO['AUDIO_RTP_PORT']])[0]
video_srtp_tlv = objs[SETUP_TYPES['VIDEO_SRTP_PARAM']]
video_info_objs = tlv.decode(video_srtp_tlv)
video_crypto_suite = video_info_objs[SETUP_SRTP_PARAM['CRYPTO']][0]
video_master_key = video_info_objs[SETUP_SRTP_PARAM['MASTER_KEY']]
video_master_salt = video_info_objs[SETUP_SRTP_PARAM['MASTER_SALT']]
audio_srtp_tlv = objs[SETUP_TYPES['AUDIO_SRTP_PARAM']]
audio_info_objs = tlv.decode(audio_srtp_tlv)
audio_crypto_suite = audio_info_objs[SETUP_SRTP_PARAM['CRYPTO']][0]
audio_master_key = audio_info_objs[SETUP_SRTP_PARAM['MASTER_KEY']]
audio_master_salt = audio_info_objs[SETUP_SRTP_PARAM['MASTER_SALT']]
logging.debug('Received endpoint configuration:'
'\nsession_id: %s\naddress: %s\nis_ipv6: %s'
'\ntarget_video_port: %s\ntarget_audio_port: %s'
'\nvideo_crypto_suite: %s\nvideo_srtp: %s'
'\naudio_crypto_suite: %s\naudio_srtp: %s',
session_id, address, is_ipv6, target_video_port, target_audio_port,
video_crypto_suite,
to_base64_str(video_master_key + video_master_salt),
audio_crypto_suite,
to_base64_str(audio_master_key + audio_master_salt))
if self.has_srtp:
video_srtp_tlv = tlv.encode(
SETUP_SRTP_PARAM['CRYPTO'], SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80'],
SETUP_SRTP_PARAM['MASTER_KEY'], video_master_key,
SETUP_SRTP_PARAM['MASTER_SALT'], video_master_salt)
audio_srtp_tlv = tlv.encode(
SETUP_SRTP_PARAM['CRYPTO'], SRTP_CRYPTO_SUITES['AES_CM_128_HMAC_SHA1_80'],
SETUP_SRTP_PARAM['MASTER_KEY'], audio_master_key,
SETUP_SRTP_PARAM['MASTER_SALT'], audio_master_salt)
else:
video_srtp_tlv = NO_SRTP
audio_srtp_tlv = NO_SRTP
video_ssrc = b'\x01'
audio_ssrc = b'\x01'
res_address_tlv = tlv.encode(
SETUP_ADDR_INFO['ADDRESS_VER'], self.stream_address_isv6,
SETUP_ADDR_INFO['ADDRESS'], self.stream_address.encode('utf-8'),
SETUP_ADDR_INFO['VIDEO_RTP_PORT'], struct.pack('<H', target_video_port),
SETUP_ADDR_INFO['AUDIO_RTP_PORT'], struct.pack('<H', target_audio_port))
response_tlv = tlv.encode(
SETUP_TYPES['SESSION_ID'], session_id.bytes,
SETUP_TYPES['STATUS'], SETUP_STATUS['SUCCESS'],
SETUP_TYPES['ADDRESS'], res_address_tlv,
SETUP_TYPES['VIDEO_SRTP_PARAM'], video_srtp_tlv,
SETUP_TYPES['AUDIO_SRTP_PARAM'], audio_srtp_tlv,
SETUP_TYPES['VIDEO_SSRC'], video_ssrc,
SETUP_TYPES['AUDIO_SSRC'], audio_ssrc,
to_base64=True)
self.sessions[session_id] = {
'id': session_id,
'address': address,
'v_port': target_video_port,
'v_srtp_key': to_base64_str(video_master_key + video_master_salt),
'a_port': target_audio_port,
'audio_srtp_key': to_base64_str(audio_master_key + audio_master_salt),
'a_ssrc': audio_ssrc
}
self.get_service('CameraRTPStreamManagement')\
.get_characteristic('SetupEndpoints')\
.set_value(response_tlv)
async def stop(self):
await asyncio.gather(*(
self.stop_stream(session_info) for session_info in self.sessions.values()))
async def start_stream(self, session_info, stream_config):
logging.debug('[%s] Starting stream with the following parameters: %s',
session_info['id'], stream_config)
cmd = self.start_stream_cmd.format(**stream_config).split()
logging.debug('Executing start stream command: "%s"', ' '.join(cmd))
try:
process = await asyncio.create_subprocess_exec(*cmd,
stdout=asyncio.subprocess.DEVNULL,
stderr=asyncio.subprocess.PIPE,
limit=1024)
except Exception as e: logging.error('Failed to start streaming process because of error: %s', e)
return False
session_info['process'] = process
logging.info('[%s] Started stream process - PID %d',
session_info['id'], process.pid)
return True
async def stop_stream(self, session_info): session_id = session_info['id']
ffmpeg_process = session_info.get('process')
if ffmpeg_process:
logging.info('[%s] Stopping stream.', session_id)
try:
ffmpeg_process.terminate()
_, stderr = await asyncio.wait_for(
ffmpeg_process.communicate(), timeout=2.0)
logging.debug('Stream command stderr: %s', stderr)
except asyncio.TimeoutError:
logging.error('Timeout while waiting for the stream process '
'to terminate. Trying with kill.')
ffmpeg_process.kill()
await ffmpeg_process.wait()
logging.debug('Stream process stopped.')
else:
logging.warning('No process for session ID %s', session_id)
async def reconfigure_stream(self, session_info, stream_config):
await self.start_stream(session_info, stream_config)
def get_snapshot(self, image_size): with open(os.path.join(RESOURCE_DIR, 'snapshot.jpg'), 'rb') as fp:
return fp.read()
| true
| true
|
1c49776c2f73f90a5fcf5d29799236503717cedd
| 5,508
|
py
|
Python
|
python/tests/unittest/test_context.py
|
LI-Mingyu/GraphScope-MY
|
942060983d3f7f8d3a3377467386e27aba285b33
|
[
"Apache-2.0"
] | 1
|
2021-12-17T03:58:08.000Z
|
2021-12-17T03:58:08.000Z
|
python/tests/unittest/test_context.py
|
LI-Mingyu/GraphScope-MY
|
942060983d3f7f8d3a3377467386e27aba285b33
|
[
"Apache-2.0"
] | null | null | null |
python/tests/unittest/test_context.py
|
LI-Mingyu/GraphScope-MY
|
942060983d3f7f8d3a3377467386e27aba285b33
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import pandas as pd
import pytest
import vineyard.io
from graphscope import lpa
from graphscope import sssp
from graphscope.framework.app import AppAssets
from graphscope.framework.errors import InvalidArgumentError
def test_simple_context_to_numpy(simple_context):
out = simple_context.to_numpy("v.id")
assert out.shape == (40521,)
out = simple_context.to_numpy("v.data")
assert out.shape == (40521,)
# selector of `e` is not done yet.
# out = simple_context.to_numpy('e.src')
# out = simple_context.to_numpy('e.dst')
# out = simple_context.to_numpy('e.data')
out = simple_context.to_numpy("r")
assert out.shape == (40521,)
def test_simple_context_to_dataframe(simple_context):
out = simple_context.to_dataframe({"id": "v.id", "data": "v.data", "result": "r"})
assert out.shape == (40521, 3)
def test_simple_context_to_vineyard_tensor(simple_context, p2p_project_directed_graph):
out = simple_context.to_vineyard_tensor("v.id")
assert out is not None
out = simple_context.to_vineyard_tensor("r")
assert out is not None
has_path = AppAssets(algo="sssp_has_path", context="tensor")
ctx = has_path(
p2p_project_directed_graph._project_to_simple(), source=6, target=3728
)
assert ctx.to_vineyard_tensor(axis=0) is not None
def test_simple_context_to_vineyard_dataframe(
simple_context, p2p_project_directed_graph
):
out = simple_context.to_vineyard_dataframe(
{"id": "v.id", "data": "v.data", "result": "r"}
)
assert out is not None
def test_property_context_to_numpy(property_context):
out = property_context.to_numpy("v:v0.dist")
assert out.shape == (40521,)
out = property_context.to_numpy("r:v1.dist_1")
assert out.shape == (40786,)
def test_property_context_to_dataframe(property_context):
out = property_context.to_dataframe({"id": "v:v0.id", "result": "r:v0.dist_0"})
assert out.shape == (40521, 2)
out = property_context.to_dataframe({"id": "v:v1.id", "result": "r:v1.dist_1"})
assert out.shape == (40786, 2)
def test_property_context_output(property_context):
property_context.output_to_client(
fd="/tmp/r0", selector={"id": "v:v0.id", "result": "r:v0.dist_0"}
)
out = pd.read_csv("/tmp/r0")
assert out.shape == (40521, 2)
def test_property_context_to_vineyard_tensor(property_context):
out = property_context.to_vineyard_tensor("v:v0.id")
assert out is not None
def test_property_context_to_vineyard_dataframe(graphscope_session, property_context):
out = property_context.to_vineyard_dataframe(
{"id": "v:v0.id", "data": "v:v0.dist", "result": "r:v0.dist_0"}
)
assert out is not None
def test_add_column(arrow_property_graph, property_context):
g2 = arrow_property_graph.add_column(
property_context, {"result_0": "r:v0.dist_0", "result_1": "r:v1.dist_1"}
)
assert "result_0" in [p.name for p in g2.schema.get_vertex_properties("v0")]
assert "result_1" in [p.name for p in g2.schema.get_vertex_properties("v1")]
def test_context_output(simple_context):
simple_context.output(
fd="file:///tmp/rlt.csv",
selector={"id": "v.id", "data": "v.data", "result": "r"},
)
def test_add_column_after_computation(arrow_property_graph):
sg = arrow_property_graph.project(vertices={"v0": ["id"]}, edges={"e0": ["weight"]})
ret = sssp(sg, 20)
g2 = arrow_property_graph.add_column(
ret, {"id_col": "v.id", "data_col": "v.data", "result_col": "r"}
)
assert "id_col" in [p.name for p in g2.schema.get_vertex_properties("v0")]
assert "data_col" in [p.name for p in g2.schema.get_vertex_properties("v0")]
assert "result_col" in [p.name for p in g2.schema.get_vertex_properties("v0")]
def test_lpa(arrow_property_graph_lpa):
ret = (
lpa(arrow_property_graph_lpa, max_round=20)
.to_dataframe(
{"node": "v:v0.id", "label0": "r:v0.label_0", "label1": "r:v0.label_1"}
)
.sort_values(by=["node"])
)
@pytest.mark.skipif("NIGHTLY" not in os.environ, reason="Run in nightly CI")
def test_error_on_selector(property_context):
with pytest.raises(KeyError, match="non_exist_label"):
out = property_context.to_numpy("v:non_exist_label.id")
with pytest.raises(KeyError, match="non_exist_prop"):
out = property_context.to_numpy("v:v0.non_exist_prop")
with pytest.raises(
InvalidArgumentError,
match="Selector in labeled vertex data context cannot be None",
):
out = property_context.to_numpy(selector=None)
with pytest.raises(ValueError, match="not enough values to unpack"):
out = property_context.to_numpy("xxx")
with pytest.raises(SyntaxError, match="Invalid selector"):
out = property_context.to_numpy("xxx:a.b")
| 35.307692
| 88
| 0.698076
|
import os
import pandas as pd
import pytest
import vineyard.io
from graphscope import lpa
from graphscope import sssp
from graphscope.framework.app import AppAssets
from graphscope.framework.errors import InvalidArgumentError
def test_simple_context_to_numpy(simple_context):
out = simple_context.to_numpy("v.id")
assert out.shape == (40521,)
out = simple_context.to_numpy("v.data")
assert out.shape == (40521,)
out = simple_context.to_numpy("r")
assert out.shape == (40521,)
def test_simple_context_to_dataframe(simple_context):
out = simple_context.to_dataframe({"id": "v.id", "data": "v.data", "result": "r"})
assert out.shape == (40521, 3)
def test_simple_context_to_vineyard_tensor(simple_context, p2p_project_directed_graph):
out = simple_context.to_vineyard_tensor("v.id")
assert out is not None
out = simple_context.to_vineyard_tensor("r")
assert out is not None
has_path = AppAssets(algo="sssp_has_path", context="tensor")
ctx = has_path(
p2p_project_directed_graph._project_to_simple(), source=6, target=3728
)
assert ctx.to_vineyard_tensor(axis=0) is not None
def test_simple_context_to_vineyard_dataframe(
simple_context, p2p_project_directed_graph
):
out = simple_context.to_vineyard_dataframe(
{"id": "v.id", "data": "v.data", "result": "r"}
)
assert out is not None
def test_property_context_to_numpy(property_context):
out = property_context.to_numpy("v:v0.dist")
assert out.shape == (40521,)
out = property_context.to_numpy("r:v1.dist_1")
assert out.shape == (40786,)
def test_property_context_to_dataframe(property_context):
out = property_context.to_dataframe({"id": "v:v0.id", "result": "r:v0.dist_0"})
assert out.shape == (40521, 2)
out = property_context.to_dataframe({"id": "v:v1.id", "result": "r:v1.dist_1"})
assert out.shape == (40786, 2)
def test_property_context_output(property_context):
property_context.output_to_client(
fd="/tmp/r0", selector={"id": "v:v0.id", "result": "r:v0.dist_0"}
)
out = pd.read_csv("/tmp/r0")
assert out.shape == (40521, 2)
def test_property_context_to_vineyard_tensor(property_context):
out = property_context.to_vineyard_tensor("v:v0.id")
assert out is not None
def test_property_context_to_vineyard_dataframe(graphscope_session, property_context):
out = property_context.to_vineyard_dataframe(
{"id": "v:v0.id", "data": "v:v0.dist", "result": "r:v0.dist_0"}
)
assert out is not None
def test_add_column(arrow_property_graph, property_context):
g2 = arrow_property_graph.add_column(
property_context, {"result_0": "r:v0.dist_0", "result_1": "r:v1.dist_1"}
)
assert "result_0" in [p.name for p in g2.schema.get_vertex_properties("v0")]
assert "result_1" in [p.name for p in g2.schema.get_vertex_properties("v1")]
def test_context_output(simple_context):
simple_context.output(
fd="file:///tmp/rlt.csv",
selector={"id": "v.id", "data": "v.data", "result": "r"},
)
def test_add_column_after_computation(arrow_property_graph):
sg = arrow_property_graph.project(vertices={"v0": ["id"]}, edges={"e0": ["weight"]})
ret = sssp(sg, 20)
g2 = arrow_property_graph.add_column(
ret, {"id_col": "v.id", "data_col": "v.data", "result_col": "r"}
)
assert "id_col" in [p.name for p in g2.schema.get_vertex_properties("v0")]
assert "data_col" in [p.name for p in g2.schema.get_vertex_properties("v0")]
assert "result_col" in [p.name for p in g2.schema.get_vertex_properties("v0")]
def test_lpa(arrow_property_graph_lpa):
ret = (
lpa(arrow_property_graph_lpa, max_round=20)
.to_dataframe(
{"node": "v:v0.id", "label0": "r:v0.label_0", "label1": "r:v0.label_1"}
)
.sort_values(by=["node"])
)
@pytest.mark.skipif("NIGHTLY" not in os.environ, reason="Run in nightly CI")
def test_error_on_selector(property_context):
with pytest.raises(KeyError, match="non_exist_label"):
out = property_context.to_numpy("v:non_exist_label.id")
with pytest.raises(KeyError, match="non_exist_prop"):
out = property_context.to_numpy("v:v0.non_exist_prop")
with pytest.raises(
InvalidArgumentError,
match="Selector in labeled vertex data context cannot be None",
):
out = property_context.to_numpy(selector=None)
with pytest.raises(ValueError, match="not enough values to unpack"):
out = property_context.to_numpy("xxx")
with pytest.raises(SyntaxError, match="Invalid selector"):
out = property_context.to_numpy("xxx:a.b")
| true
| true
|
1c49784aa8306157fe272237ed9d63b7286a170d
| 6,799
|
py
|
Python
|
torchgan/metrics/proximal_duality_gap.py
|
proximal-dg/proximal_dg
|
000e925c7daab099b2c3735f99e65e6b2a00a799
|
[
"MIT"
] | 13
|
2021-05-12T05:37:20.000Z
|
2022-03-30T17:05:47.000Z
|
torchgan/metrics/proximal_duality_gap.py
|
proximal-dg/proximal_dg
|
000e925c7daab099b2c3735f99e65e6b2a00a799
|
[
"MIT"
] | 3
|
2021-10-20T04:51:36.000Z
|
2022-02-25T13:37:32.000Z
|
torchgan/metrics/proximal_duality_gap.py
|
proximal-dg/proximal_dg
|
000e925c7daab099b2c3735f99e65e6b2a00a799
|
[
"MIT"
] | 1
|
2021-12-28T17:03:08.000Z
|
2021-12-28T17:03:08.000Z
|
import torch
import torch.nn.functional as F
import torchvision
import copy
import time
import os
from ..utils import reduce
from .metric import EvaluationMetric
from torchgan.trainer import *
import torch.multiprocessing as mp
import numpy as np
from ray import tune
from torch.optim import Adam
__all__ = ["ProximalDualityGap"]
class ProximalDualityGap(EvaluationMetric):
r"""
Computes the DualityGap of a Model.
Args:
optimizer : The optimizer to be used for DG estimation ('SGD','Adam')
n_iter : The no. steps in M1 and M2 estimation (int)
perturb : Use perturbed DG (Boolean)
"""
def __init__(self,perturbation=False,network_params=None,generator_loss=None,discriminator_loss=None,evaluation_loss=None,proximal_evaluation_loss=None,train_dataloader=None,eval_dataloader=None,n_iter=10,log_dir="./",sample_size=28,n_row=7,verbose=False):
super(ProximalDualityGap, self).__init__()
self.perturbation = perturbation
self.n_iter = n_iter
self.network_params = network_params
self.generator_loss = generator_loss
self.discriminator_loss = discriminator_loss
self.evaluation_loss = evaluation_loss
self.proximal_evaluation_loss = proximal_evaluation_loss if proximal_evaluation_loss is not None else evaluation_loss
self.train_dataloader = train_dataloader
self.eval_dataloader = eval_dataloader if eval_dataloader is not None else train_dataloader
self.log_dir = log_dir
self.sample_size = sample_size
self.n_row = n_row
self.set_arg_map({"ckpt_dir":"checkpoints" , "ckpt_no":"last_retained_checkpoint"})
self.verbose = verbose
self.evaluation_loss.eval_only = True
self.history = []
def preprocess(self, x):
r"""
Preprocessor for the trainer object
Args:
x (torch.Tensor) : Instance of class BaseTrainer
Returns:
Trainer class after preprocessing
"""
return x
def attempt_deviation(self,trainer):
trainer(self.train_dataloader)
trainer.losses[type(self.evaluation_loss).__name__] = self.evaluate
trainer._store_loss_maps()
batch_score = []
for data in self.eval_dataloader:
if type(data) is tuple or type(data) is list:
trainer.real_inputs = data[0].to(trainer.device)
trainer.labels = data[1].to(trainer.device)
elif type(data) is torch.Tensor:
trainer.real_inputs = data.to(trainer.device)
else:
trainer.real_inputs = data
batch_score.append(-1*self.evaluate.train_ops(**trainer._get_arguments(trainer.loss_arg_maps[type(self.evaluation_loss).__name__])) )
return np.mean(batch_score)
def calculate_score(self,load_path=None,m1_dir=None,m2_dir=None,perturb_std=1e-3):
r"""
Computes the duality gap for a given trainer instance.
Args:
load_path (str) : Path to load the Instance of class BaseTrainer
m1_dir (str) : Path to save the logs for estimating M1
m2_dir (str) : Path to save the logs for estimating M2
Returns:
The Duality Gap.
"""
disc_trainer = Trainer(self.network_params,[self.discriminator_loss],log_dir=os.path.join(m1_dir,"logs"),recon=os.path.join(m1_dir,"images"),checkpoints=os.path.join(m1_dir,"ckpts","model_"),n_critic=1,sample_size=self.sample_size,nrow=self.n_row,verbose=self.verbose)
disc_trainer.load_model(load_path,model_only=True)
disc_trainer.epochs = self.n_iter
disc_trainer.loss_information["generator_iters"] = 1
disc_trainer.tune_report = "DG"
if(perturb_std>0):
with torch.no_grad():
for x in disc_trainer.discriminator.parameters():
x.add_(torch.normal(mean=0,std=perturb_std,size=x.size(),device=disc_trainer.device))
gen_trainer = Trainer(self.network_params,[self.generator_loss],log_dir=os.path.join(m2_dir,"logs"),recon=os.path.join(m2_dir,"images"),checkpoints=os.path.join(m2_dir,"ckpts","model_"),n_critic=1,sample_size=self.sample_size,nrow=self.n_row,verbose=self.verbose)
gen_trainer.load_model(load_path,model_only=True)
gen_trainer.epochs = self.n_iter
gen_trainer.loss_information["discriminator_iters"] = 1
gen_trainer.tune_report = "DG"
if(perturb_std>0):
with torch.no_grad():
for x in gen_trainer.generator.parameters():
x.add_(torch.normal(mean=0,std=perturb_std,size=x.size(),device=gen_trainer.device))
if(self.verbose):
print("__"*10,"\n{:30s}\n".format("Estimating M1"),"__"*10)
self.evaluate = self.evaluation_loss
M1 = self.attempt_deviation(disc_trainer)
if(self.verbose):
print("M1 : ",M1)
print("__"*10,"\n{:30s}\n".format("Estimating M2"),"__"*10)
# M2 = 0
self.evaluate = self.proximal_evaluation_loss
M2 = self.attempt_deviation(gen_trainer)
if(self.verbose):
print("M2 : ",M2)
disc_trainer.complete()
gen_trainer.complete()
return abs(M1 - M2)
def metric_ops(self,ckpt_dir=None,ckpt_no=None):
r"""Defines the set of operations necessary to compute the ClassifierScore.
Args:
generator (torchgan.models.Generator): The generator which needs to be evaluated.
device (torch.device): Device on which the generator is present.
Returns:
The Classifier Score (scalar quantity)
"""
if(self.verbose):
print("=="*60,"\n{:^120s}\n".format("Estimating Proximal Duality Gap"),"=="*60)
load_path = ckpt_dir + str(ckpt_no-1)+ ".model"
m1_dir = os.path.join(self.log_dir,"proximal_duality_gap","M1","iter_{}".format(ckpt_no))
m2_dir = os.path.join(self.log_dir,"proximal_duality_gap","M2","iter_{}".format(ckpt_no))
start_time = time.time()
score = self.calculate_score(load_path=load_path,m1_dir=m1_dir,m2_dir=m2_dir)
time_taken = time.time()-start_time
if(self.verbose):
print("__"*60,"\n{:^50s} : {}\n".format("Proximal Duality Gap",score),"__"*60)
self.history.append(abs(score))
tune.report(score=np.mean(self.history))
return score
| 42.761006
| 292
| 0.626121
|
import torch
import torch.nn.functional as F
import torchvision
import copy
import time
import os
from ..utils import reduce
from .metric import EvaluationMetric
from torchgan.trainer import *
import torch.multiprocessing as mp
import numpy as np
from ray import tune
from torch.optim import Adam
__all__ = ["ProximalDualityGap"]
class ProximalDualityGap(EvaluationMetric):
def __init__(self,perturbation=False,network_params=None,generator_loss=None,discriminator_loss=None,evaluation_loss=None,proximal_evaluation_loss=None,train_dataloader=None,eval_dataloader=None,n_iter=10,log_dir="./",sample_size=28,n_row=7,verbose=False):
super(ProximalDualityGap, self).__init__()
self.perturbation = perturbation
self.n_iter = n_iter
self.network_params = network_params
self.generator_loss = generator_loss
self.discriminator_loss = discriminator_loss
self.evaluation_loss = evaluation_loss
self.proximal_evaluation_loss = proximal_evaluation_loss if proximal_evaluation_loss is not None else evaluation_loss
self.train_dataloader = train_dataloader
self.eval_dataloader = eval_dataloader if eval_dataloader is not None else train_dataloader
self.log_dir = log_dir
self.sample_size = sample_size
self.n_row = n_row
self.set_arg_map({"ckpt_dir":"checkpoints" , "ckpt_no":"last_retained_checkpoint"})
self.verbose = verbose
self.evaluation_loss.eval_only = True
self.history = []
def preprocess(self, x):
return x
def attempt_deviation(self,trainer):
trainer(self.train_dataloader)
trainer.losses[type(self.evaluation_loss).__name__] = self.evaluate
trainer._store_loss_maps()
batch_score = []
for data in self.eval_dataloader:
if type(data) is tuple or type(data) is list:
trainer.real_inputs = data[0].to(trainer.device)
trainer.labels = data[1].to(trainer.device)
elif type(data) is torch.Tensor:
trainer.real_inputs = data.to(trainer.device)
else:
trainer.real_inputs = data
batch_score.append(-1*self.evaluate.train_ops(**trainer._get_arguments(trainer.loss_arg_maps[type(self.evaluation_loss).__name__])) )
return np.mean(batch_score)
def calculate_score(self,load_path=None,m1_dir=None,m2_dir=None,perturb_std=1e-3):
disc_trainer = Trainer(self.network_params,[self.discriminator_loss],log_dir=os.path.join(m1_dir,"logs"),recon=os.path.join(m1_dir,"images"),checkpoints=os.path.join(m1_dir,"ckpts","model_"),n_critic=1,sample_size=self.sample_size,nrow=self.n_row,verbose=self.verbose)
disc_trainer.load_model(load_path,model_only=True)
disc_trainer.epochs = self.n_iter
disc_trainer.loss_information["generator_iters"] = 1
disc_trainer.tune_report = "DG"
if(perturb_std>0):
with torch.no_grad():
for x in disc_trainer.discriminator.parameters():
x.add_(torch.normal(mean=0,std=perturb_std,size=x.size(),device=disc_trainer.device))
gen_trainer = Trainer(self.network_params,[self.generator_loss],log_dir=os.path.join(m2_dir,"logs"),recon=os.path.join(m2_dir,"images"),checkpoints=os.path.join(m2_dir,"ckpts","model_"),n_critic=1,sample_size=self.sample_size,nrow=self.n_row,verbose=self.verbose)
gen_trainer.load_model(load_path,model_only=True)
gen_trainer.epochs = self.n_iter
gen_trainer.loss_information["discriminator_iters"] = 1
gen_trainer.tune_report = "DG"
if(perturb_std>0):
with torch.no_grad():
for x in gen_trainer.generator.parameters():
x.add_(torch.normal(mean=0,std=perturb_std,size=x.size(),device=gen_trainer.device))
if(self.verbose):
print("__"*10,"\n{:30s}\n".format("Estimating M1"),"__"*10)
self.evaluate = self.evaluation_loss
M1 = self.attempt_deviation(disc_trainer)
if(self.verbose):
print("M1 : ",M1)
print("__"*10,"\n{:30s}\n".format("Estimating M2"),"__"*10)
self.evaluate = self.proximal_evaluation_loss
M2 = self.attempt_deviation(gen_trainer)
if(self.verbose):
print("M2 : ",M2)
disc_trainer.complete()
gen_trainer.complete()
return abs(M1 - M2)
def metric_ops(self,ckpt_dir=None,ckpt_no=None):
if(self.verbose):
print("=="*60,"\n{:^120s}\n".format("Estimating Proximal Duality Gap"),"=="*60)
load_path = ckpt_dir + str(ckpt_no-1)+ ".model"
m1_dir = os.path.join(self.log_dir,"proximal_duality_gap","M1","iter_{}".format(ckpt_no))
m2_dir = os.path.join(self.log_dir,"proximal_duality_gap","M2","iter_{}".format(ckpt_no))
start_time = time.time()
score = self.calculate_score(load_path=load_path,m1_dir=m1_dir,m2_dir=m2_dir)
time_taken = time.time()-start_time
if(self.verbose):
print("__"*60,"\n{:^50s} : {}\n".format("Proximal Duality Gap",score),"__"*60)
self.history.append(abs(score))
tune.report(score=np.mean(self.history))
return score
| true
| true
|
1c49787b94ab42aa228264ebb5813f6406a67b28
| 203
|
py
|
Python
|
Old/src/com/basic/call_func.py
|
exchris/Pythonlearn
|
174f38a86cf1c85d6fc099005aab3568e7549cd0
|
[
"MIT"
] | null | null | null |
Old/src/com/basic/call_func.py
|
exchris/Pythonlearn
|
174f38a86cf1c85d6fc099005aab3568e7549cd0
|
[
"MIT"
] | 1
|
2018-11-27T09:58:54.000Z
|
2018-11-27T09:58:54.000Z
|
Old/src/com/basic/call_func.py
|
exchris/pythonlearn
|
174f38a86cf1c85d6fc099005aab3568e7549cd0
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
x = abs(100)
y = abs(-20)
print(x, y)
print('max(1, 2, 3) =', max(1, 2, 3))
print('min(1, 2, 3) =', min(1, 2, 3))
print('sum([1, 2, 3]) =', sum([1, 2, 3]))
| 22.555556
| 41
| 0.477833
|
x = abs(100)
y = abs(-20)
print(x, y)
print('max(1, 2, 3) =', max(1, 2, 3))
print('min(1, 2, 3) =', min(1, 2, 3))
print('sum([1, 2, 3]) =', sum([1, 2, 3]))
| true
| true
|
1c4979a46c5b421ace7a15f391b274820af9e4a1
| 25,942
|
py
|
Python
|
stable_baselines3/sac/policies.py
|
danielhettegger-rl/stable-baselines3
|
23de12e95d96b7bb6136c6a338e407ae7db7c545
|
[
"MIT"
] | null | null | null |
stable_baselines3/sac/policies.py
|
danielhettegger-rl/stable-baselines3
|
23de12e95d96b7bb6136c6a338e407ae7db7c545
|
[
"MIT"
] | null | null | null |
stable_baselines3/sac/policies.py
|
danielhettegger-rl/stable-baselines3
|
23de12e95d96b7bb6136c6a338e407ae7db7c545
|
[
"MIT"
] | null | null | null |
import warnings
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import torch as th
from torch import nn
from stable_baselines3.common.distributions import SquashedDiagGaussianDistribution, StateDependentNoiseDistribution
from stable_baselines3.common.policies import BaseModel, BasePolicy, ContinuousCritic, register_policy
from stable_baselines3.common.preprocessing import get_action_dim
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
NatureCNN,
create_mlp,
get_actor_critic_arch,
)
from stable_baselines3.common.type_aliases import Schedule
# CAP the standard deviation of the actor
LOG_STD_MAX = 2
LOG_STD_MIN = -20
class Actor(BasePolicy):
"""
Actor network (policy) for SAC.
:param observation_space: Obervation space
:param action_space: Action space
:param net_arch: Network architecture
:param features_extractor: Network to extract features
(a CNN when using images, a nn.Flatten() layer otherwise)
:param features_dim: Number of features
:param activation_fn: Activation function
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param full_std: Whether to use (n_features x n_actions) parameters
for the std instead of only (n_features,) when using gSDE.
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
net_arch: List[int],
features_extractor: nn.Module,
features_dim: int,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
normalize_images: bool = True,
):
super(Actor, self).__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
squash_output=True,
)
# Save arguments to re-create object at loading
self.use_sde = use_sde
self.sde_features_extractor = None
self.net_arch = net_arch
self.features_dim = features_dim
self.activation_fn = activation_fn
self.log_std_init = log_std_init
self.sde_net_arch = sde_net_arch
self.use_expln = use_expln
self.full_std = full_std
self.clip_mean = clip_mean
if sde_net_arch is not None:
warnings.warn("sde_net_arch is deprecated and will be removed in SB3 v2.4.0.", DeprecationWarning)
action_dim = get_action_dim(self.action_space)
latent_pi_net = create_mlp(features_dim, -1, net_arch, activation_fn)
self.latent_pi = nn.Sequential(*latent_pi_net)
last_layer_dim = net_arch[-1] if len(net_arch) > 0 else features_dim
if self.use_sde:
self.action_dist = StateDependentNoiseDistribution(
action_dim, full_std=full_std, use_expln=use_expln, learn_features=True, squash_output=True
)
self.mu, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=last_layer_dim, latent_sde_dim=last_layer_dim, log_std_init=log_std_init
)
# Avoid numerical issues by limiting the mean of the Gaussian
# to be in [-clip_mean, clip_mean]
if clip_mean > 0.0:
self.mu = nn.Sequential(self.mu, nn.Hardtanh(min_val=-clip_mean, max_val=clip_mean))
else:
self.action_dist = SquashedDiagGaussianDistribution(action_dim)
self.mu = nn.Linear(last_layer_dim, action_dim)
self.log_std = nn.Linear(last_layer_dim, action_dim)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_arch,
features_dim=self.features_dim,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
full_std=self.full_std,
use_expln=self.use_expln,
features_extractor=self.features_extractor,
clip_mean=self.clip_mean,
)
)
return data
def get_std(self) -> th.Tensor:
"""
Retrieve the standard deviation of the action distribution.
Only useful when using gSDE.
It corresponds to ``th.exp(log_std)`` in the normal case,
but is slightly different when using ``expln`` function
(cf StateDependentNoiseDistribution doc).
:return:
"""
msg = "get_std() is only available when using gSDE"
assert isinstance(self.action_dist, StateDependentNoiseDistribution), msg
return self.action_dist.get_std(self.log_std)
def reset_noise(self, batch_size: int = 1) -> None:
"""
Sample new weights for the exploration matrix, when using gSDE.
:param batch_size:
"""
msg = "reset_noise() is only available when using gSDE"
assert isinstance(self.action_dist, StateDependentNoiseDistribution), msg
self.action_dist.sample_weights(self.log_std, batch_size=batch_size)
def get_action_dist_params(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, Dict[str, th.Tensor]]:
"""
Get the parameters for the action distribution.
:param obs:
:return:
Mean, standard deviation and optional keyword arguments.
"""
features = self.extract_features(obs)
latent_pi = self.latent_pi(features)
mean_actions = self.mu(latent_pi)
if self.use_sde:
return mean_actions, self.log_std, dict(latent_sde=latent_pi)
# Unstructured exploration (Original implementation)
log_std = self.log_std(latent_pi)
# Original Implementation to cap the standard deviation
log_std = th.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX)
return mean_actions, log_std, {}
def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:
mean_actions, log_std, kwargs = self.get_action_dist_params(obs)
# Note: the action is squashed
return self.action_dist.actions_from_params(mean_actions, log_std, deterministic=deterministic, **kwargs)
def action_log_prob(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:
mean_actions, log_std, kwargs = self.get_action_dist_params(obs)
# return action and associated log prob
return self.action_dist.log_prob_from_params(mean_actions, log_std, **kwargs)
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self.forward(observation, deterministic)
class SACPolicy(BasePolicy):
"""
Policy class (with both actor and critic) for SAC.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether to share or not the features extractor
between the actor and the critic (this saves computation time)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super(SACPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=True,
)
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [256, 256]
actor_arch, critic_arch = get_actor_critic_arch(net_arch)
self.net_arch = net_arch
self.activation_fn = activation_fn
self.net_args = {
"observation_space": self.observation_space,
"action_space": self.action_space,
"net_arch": actor_arch,
"activation_fn": self.activation_fn,
"normalize_images": normalize_images,
}
self.actor_kwargs = self.net_args.copy()
if sde_net_arch is not None:
warnings.warn("sde_net_arch is deprecated and will be removed in SB3 v2.4.0.", DeprecationWarning)
sde_kwargs = {
"use_sde": use_sde,
"log_std_init": log_std_init,
"use_expln": use_expln,
"clip_mean": clip_mean,
}
self.actor_kwargs.update(sde_kwargs)
self.critic_kwargs = self.net_args.copy()
self.critic_kwargs.update(
{
"n_critics": n_critics,
"net_arch": critic_arch,
"share_features_extractor": share_features_extractor,
}
)
self.actor, self.actor_target = None, None
self.critic, self.critic_target = None, None
self.share_features_extractor = share_features_extractor
self._build(lr_schedule)
def _build(self, lr_schedule: Schedule) -> None:
self.actor = self.make_actor()
self.actor.optimizer = self.optimizer_class(self.actor.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
if self.share_features_extractor:
self.critic = self.make_critic(features_extractor=self.actor.features_extractor)
# Do not optimize the shared features extractor with the critic loss
# otherwise, there are gradient computation issues
critic_parameters = [param for name, param in self.critic.named_parameters() if "features_extractor" not in name]
else:
# Create a separate features extractor for the critic
# this requires more memory and computation
self.critic = self.make_critic(features_extractor=None)
critic_parameters = self.critic.parameters()
# Critic target should not share the features extractor with critic
self.critic_target = self.make_critic(features_extractor=None)
self.critic_target.load_state_dict(self.critic.state_dict())
self.critic.optimizer = self.optimizer_class(critic_parameters, lr=lr_schedule(1), **self.optimizer_kwargs)
# Target networks should always be in eval mode
self.critic_target.set_training_mode(False)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.net_args["activation_fn"],
use_sde=self.actor_kwargs["use_sde"],
log_std_init=self.actor_kwargs["log_std_init"],
use_expln=self.actor_kwargs["use_expln"],
clip_mean=self.actor_kwargs["clip_mean"],
n_critics=self.critic_kwargs["n_critics"],
lr_schedule=self._dummy_schedule, # dummy lr schedule, not needed for loading policy alone
optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def reset_noise(self, batch_size: int = 1) -> None:
"""
Sample new weights for the exploration matrix, when using gSDE.
:param batch_size:
"""
self.actor.reset_noise(batch_size=batch_size)
def make_actor(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> Actor:
actor_kwargs = self._update_features_extractor(self.actor_kwargs, features_extractor)
return Actor(**actor_kwargs).to(self.device)
def make_critic(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> ContinuousCritic:
critic_kwargs = self._update_features_extractor(self.critic_kwargs, features_extractor)
return ContinuousCritic(**critic_kwargs).to(self.device)
def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self.actor(observation, deterministic)
def set_training_mode(self, mode: bool) -> None:
"""
Put the policy in either training or evaluation mode.
This affects certain modules, such as batch normalisation and dropout.
:param mode: if true, set to training mode, else set to evaluation mode
"""
self.actor.set_training_mode(mode)
self.critic.set_training_mode(mode)
self.training = mode
MlpPolicy = SACPolicy
class IPTSACPolicy(SACPolicy):
"""
Policy Class for Interactive Policy Transfer (IPT) version of SAC.
Most Parameters are passed through to the SAC policy class.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.
:param features_extractor_class: Features extractor to use.
:param features_extractor_kwargs: Keyword arguments
to pass to the features extractor.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether to share or not the features extractor
between the actor and the critic (this saves computation time)
:param teacher_policy: The policy, which is used to interactively guide the training process.
:param ipt_weight_schedule: The schedule for the weight of the teacher policy.
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
teacher_policy: BaseModel = None,
ipt_weight_schedule: Schedule = None,
**kwargs
):
super().__init__(observation_space, action_space, lr_schedule, **kwargs)
self.teacher_policy = teacher_policy
self.ipt_weight_schedule = ipt_weight_schedule
if ipt_weight_schedule is not None:
self.ipt_weight = ipt_weight_schedule(1)
else:
self.ipt_weight = 0.0
def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
if self.ipt_weight == 0 or deterministic:
return self.actor(observation, deterministic)
mean_actions, log_std, kwargs = self.actor.get_action_dist_params(observation)
# Note: the action is squashed
actor_noise = self.actor.action_dist.actions_from_params(
th.zeros_like(mean_actions),
log_std,
deterministic=deterministic,
**kwargs
)
teacher_action = self.teacher_policy.forward(observation)
action = (self.ipt_weight * teacher_action + (1.0 - self.ipt_weight) * mean_actions) + actor_noise
return action
def update_schedules(self, current_progress_remaining):
if self.ipt_weight_schedule is not None:
self.ipt_weight = self.ipt_weight_schedule(current_progress_remaining)
class CnnPolicy(SACPolicy):
"""
Policy class (with both actor and critic) for SAC.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.
:param features_extractor_class: Features extractor to use.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether to share or not the features extractor
between the actor and the critic (this saves computation time)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super(CnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
use_sde,
log_std_init,
sde_net_arch,
use_expln,
clip_mean,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
n_critics,
share_features_extractor,
)
class MultiInputPolicy(SACPolicy):
"""
Policy class (with both actor and critic) for SAC.
:param observation_space: Observation space
:param action_space: Action space
:param lr_schedule: Learning rate schedule (could be constant)
:param net_arch: The specification of the policy and value networks.
:param activation_fn: Activation function
:param use_sde: Whether to use State Dependent Exploration or not
:param log_std_init: Initial value for the log standard deviation
:param sde_net_arch: Network architecture for extracting features
when using gSDE. If None, the latent features from the policy will be used.
Pass an empty list to use the states as features.
:param use_expln: Use ``expln()`` function instead of ``exp()`` when using gSDE to ensure
a positive standard deviation (cf paper). It allows to keep variance
above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.
:param clip_mean: Clip the mean output when using gSDE to avoid numerical instability.
:param features_extractor_class: Features extractor to use.
:param normalize_images: Whether to normalize images or not,
dividing by 255.0 (True by default)
:param optimizer_class: The optimizer to use,
``th.optim.Adam`` by default
:param optimizer_kwargs: Additional keyword arguments,
excluding the learning rate, to pass to the optimizer
:param n_critics: Number of critic networks to create.
:param share_features_extractor: Whether to share or not the features extractor
between the actor and the critic (this saves computation time)
"""
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super(MultiInputPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
use_sde,
log_std_init,
sde_net_arch,
use_expln,
clip_mean,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
n_critics,
share_features_extractor,
)
register_policy("MlpPolicy", MlpPolicy)
register_policy("IptMlpPolicy", IPTSACPolicy)
register_policy("CnnPolicy", CnnPolicy)
register_policy("MultiInputPolicy", MultiInputPolicy)
| 43.021559
| 125
| 0.67304
|
import warnings
from typing import Any, Dict, List, Optional, Tuple, Type, Union
import gym
import torch as th
from torch import nn
from stable_baselines3.common.distributions import SquashedDiagGaussianDistribution, StateDependentNoiseDistribution
from stable_baselines3.common.policies import BaseModel, BasePolicy, ContinuousCritic, register_policy
from stable_baselines3.common.preprocessing import get_action_dim
from stable_baselines3.common.torch_layers import (
BaseFeaturesExtractor,
CombinedExtractor,
FlattenExtractor,
NatureCNN,
create_mlp,
get_actor_critic_arch,
)
from stable_baselines3.common.type_aliases import Schedule
LOG_STD_MAX = 2
LOG_STD_MIN = -20
class Actor(BasePolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
net_arch: List[int],
features_extractor: nn.Module,
features_dim: int,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
full_std: bool = True,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
normalize_images: bool = True,
):
super(Actor, self).__init__(
observation_space,
action_space,
features_extractor=features_extractor,
normalize_images=normalize_images,
squash_output=True,
)
self.use_sde = use_sde
self.sde_features_extractor = None
self.net_arch = net_arch
self.features_dim = features_dim
self.activation_fn = activation_fn
self.log_std_init = log_std_init
self.sde_net_arch = sde_net_arch
self.use_expln = use_expln
self.full_std = full_std
self.clip_mean = clip_mean
if sde_net_arch is not None:
warnings.warn("sde_net_arch is deprecated and will be removed in SB3 v2.4.0.", DeprecationWarning)
action_dim = get_action_dim(self.action_space)
latent_pi_net = create_mlp(features_dim, -1, net_arch, activation_fn)
self.latent_pi = nn.Sequential(*latent_pi_net)
last_layer_dim = net_arch[-1] if len(net_arch) > 0 else features_dim
if self.use_sde:
self.action_dist = StateDependentNoiseDistribution(
action_dim, full_std=full_std, use_expln=use_expln, learn_features=True, squash_output=True
)
self.mu, self.log_std = self.action_dist.proba_distribution_net(
latent_dim=last_layer_dim, latent_sde_dim=last_layer_dim, log_std_init=log_std_init
)
if clip_mean > 0.0:
self.mu = nn.Sequential(self.mu, nn.Hardtanh(min_val=-clip_mean, max_val=clip_mean))
else:
self.action_dist = SquashedDiagGaussianDistribution(action_dim)
self.mu = nn.Linear(last_layer_dim, action_dim)
self.log_std = nn.Linear(last_layer_dim, action_dim)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_arch,
features_dim=self.features_dim,
activation_fn=self.activation_fn,
use_sde=self.use_sde,
log_std_init=self.log_std_init,
full_std=self.full_std,
use_expln=self.use_expln,
features_extractor=self.features_extractor,
clip_mean=self.clip_mean,
)
)
return data
def get_std(self) -> th.Tensor:
msg = "get_std() is only available when using gSDE"
assert isinstance(self.action_dist, StateDependentNoiseDistribution), msg
return self.action_dist.get_std(self.log_std)
def reset_noise(self, batch_size: int = 1) -> None:
msg = "reset_noise() is only available when using gSDE"
assert isinstance(self.action_dist, StateDependentNoiseDistribution), msg
self.action_dist.sample_weights(self.log_std, batch_size=batch_size)
def get_action_dist_params(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor, Dict[str, th.Tensor]]:
features = self.extract_features(obs)
latent_pi = self.latent_pi(features)
mean_actions = self.mu(latent_pi)
if self.use_sde:
return mean_actions, self.log_std, dict(latent_sde=latent_pi)
log_std = self.log_std(latent_pi)
log_std = th.clamp(log_std, LOG_STD_MIN, LOG_STD_MAX)
return mean_actions, log_std, {}
def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:
mean_actions, log_std, kwargs = self.get_action_dist_params(obs)
return self.action_dist.actions_from_params(mean_actions, log_std, deterministic=deterministic, **kwargs)
def action_log_prob(self, obs: th.Tensor) -> Tuple[th.Tensor, th.Tensor]:
mean_actions, log_std, kwargs = self.get_action_dist_params(obs)
return self.action_dist.log_prob_from_params(mean_actions, log_std, **kwargs)
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self.forward(observation, deterministic)
class SACPolicy(BasePolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
features_extractor_class: Type[BaseFeaturesExtractor] = FlattenExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super(SACPolicy, self).__init__(
observation_space,
action_space,
features_extractor_class,
features_extractor_kwargs,
optimizer_class=optimizer_class,
optimizer_kwargs=optimizer_kwargs,
squash_output=True,
)
if net_arch is None:
if features_extractor_class == NatureCNN:
net_arch = []
else:
net_arch = [256, 256]
actor_arch, critic_arch = get_actor_critic_arch(net_arch)
self.net_arch = net_arch
self.activation_fn = activation_fn
self.net_args = {
"observation_space": self.observation_space,
"action_space": self.action_space,
"net_arch": actor_arch,
"activation_fn": self.activation_fn,
"normalize_images": normalize_images,
}
self.actor_kwargs = self.net_args.copy()
if sde_net_arch is not None:
warnings.warn("sde_net_arch is deprecated and will be removed in SB3 v2.4.0.", DeprecationWarning)
sde_kwargs = {
"use_sde": use_sde,
"log_std_init": log_std_init,
"use_expln": use_expln,
"clip_mean": clip_mean,
}
self.actor_kwargs.update(sde_kwargs)
self.critic_kwargs = self.net_args.copy()
self.critic_kwargs.update(
{
"n_critics": n_critics,
"net_arch": critic_arch,
"share_features_extractor": share_features_extractor,
}
)
self.actor, self.actor_target = None, None
self.critic, self.critic_target = None, None
self.share_features_extractor = share_features_extractor
self._build(lr_schedule)
def _build(self, lr_schedule: Schedule) -> None:
self.actor = self.make_actor()
self.actor.optimizer = self.optimizer_class(self.actor.parameters(), lr=lr_schedule(1), **self.optimizer_kwargs)
if self.share_features_extractor:
self.critic = self.make_critic(features_extractor=self.actor.features_extractor)
critic_parameters = [param for name, param in self.critic.named_parameters() if "features_extractor" not in name]
else:
self.critic = self.make_critic(features_extractor=None)
critic_parameters = self.critic.parameters()
self.critic_target = self.make_critic(features_extractor=None)
self.critic_target.load_state_dict(self.critic.state_dict())
self.critic.optimizer = self.optimizer_class(critic_parameters, lr=lr_schedule(1), **self.optimizer_kwargs)
self.critic_target.set_training_mode(False)
def _get_constructor_parameters(self) -> Dict[str, Any]:
data = super()._get_constructor_parameters()
data.update(
dict(
net_arch=self.net_arch,
activation_fn=self.net_args["activation_fn"],
use_sde=self.actor_kwargs["use_sde"],
log_std_init=self.actor_kwargs["log_std_init"],
use_expln=self.actor_kwargs["use_expln"],
clip_mean=self.actor_kwargs["clip_mean"],
n_critics=self.critic_kwargs["n_critics"],
lr_schedule=self._dummy_schedule, optimizer_class=self.optimizer_class,
optimizer_kwargs=self.optimizer_kwargs,
features_extractor_class=self.features_extractor_class,
features_extractor_kwargs=self.features_extractor_kwargs,
)
)
return data
def reset_noise(self, batch_size: int = 1) -> None:
self.actor.reset_noise(batch_size=batch_size)
def make_actor(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> Actor:
actor_kwargs = self._update_features_extractor(self.actor_kwargs, features_extractor)
return Actor(**actor_kwargs).to(self.device)
def make_critic(self, features_extractor: Optional[BaseFeaturesExtractor] = None) -> ContinuousCritic:
critic_kwargs = self._update_features_extractor(self.critic_kwargs, features_extractor)
return ContinuousCritic(**critic_kwargs).to(self.device)
def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self.actor(observation, deterministic)
def set_training_mode(self, mode: bool) -> None:
self.actor.set_training_mode(mode)
self.critic.set_training_mode(mode)
self.training = mode
MlpPolicy = SACPolicy
class IPTSACPolicy(SACPolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
teacher_policy: BaseModel = None,
ipt_weight_schedule: Schedule = None,
**kwargs
):
super().__init__(observation_space, action_space, lr_schedule, **kwargs)
self.teacher_policy = teacher_policy
self.ipt_weight_schedule = ipt_weight_schedule
if ipt_weight_schedule is not None:
self.ipt_weight = ipt_weight_schedule(1)
else:
self.ipt_weight = 0.0
def forward(self, obs: th.Tensor, deterministic: bool = False) -> th.Tensor:
return self._predict(obs, deterministic=deterministic)
def _predict(self, observation: th.Tensor, deterministic: bool = False) -> th.Tensor:
if self.ipt_weight == 0 or deterministic:
return self.actor(observation, deterministic)
mean_actions, log_std, kwargs = self.actor.get_action_dist_params(observation)
actor_noise = self.actor.action_dist.actions_from_params(
th.zeros_like(mean_actions),
log_std,
deterministic=deterministic,
**kwargs
)
teacher_action = self.teacher_policy.forward(observation)
action = (self.ipt_weight * teacher_action + (1.0 - self.ipt_weight) * mean_actions) + actor_noise
return action
def update_schedules(self, current_progress_remaining):
if self.ipt_weight_schedule is not None:
self.ipt_weight = self.ipt_weight_schedule(current_progress_remaining)
class CnnPolicy(SACPolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
features_extractor_class: Type[BaseFeaturesExtractor] = NatureCNN,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super(CnnPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
use_sde,
log_std_init,
sde_net_arch,
use_expln,
clip_mean,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
n_critics,
share_features_extractor,
)
class MultiInputPolicy(SACPolicy):
def __init__(
self,
observation_space: gym.spaces.Space,
action_space: gym.spaces.Space,
lr_schedule: Schedule,
net_arch: Optional[Union[List[int], Dict[str, List[int]]]] = None,
activation_fn: Type[nn.Module] = nn.ReLU,
use_sde: bool = False,
log_std_init: float = -3,
sde_net_arch: Optional[List[int]] = None,
use_expln: bool = False,
clip_mean: float = 2.0,
features_extractor_class: Type[BaseFeaturesExtractor] = CombinedExtractor,
features_extractor_kwargs: Optional[Dict[str, Any]] = None,
normalize_images: bool = True,
optimizer_class: Type[th.optim.Optimizer] = th.optim.Adam,
optimizer_kwargs: Optional[Dict[str, Any]] = None,
n_critics: int = 2,
share_features_extractor: bool = True,
):
super(MultiInputPolicy, self).__init__(
observation_space,
action_space,
lr_schedule,
net_arch,
activation_fn,
use_sde,
log_std_init,
sde_net_arch,
use_expln,
clip_mean,
features_extractor_class,
features_extractor_kwargs,
normalize_images,
optimizer_class,
optimizer_kwargs,
n_critics,
share_features_extractor,
)
register_policy("MlpPolicy", MlpPolicy)
register_policy("IptMlpPolicy", IPTSACPolicy)
register_policy("CnnPolicy", CnnPolicy)
register_policy("MultiInputPolicy", MultiInputPolicy)
| true
| true
|
1c497aa96e625e83e18815ae709066fd24247385
| 6,399
|
py
|
Python
|
controller/gui.py
|
HighwayFlocking/HighwayFlocking
|
e870579d11574f5789162481219e771610f8b721
|
[
"ECL-2.0",
"Apache-2.0"
] | 44
|
2015-06-11T14:39:26.000Z
|
2021-05-21T11:06:47.000Z
|
controller/gui.py
|
HighwayFlocking/HighwayFlocking
|
e870579d11574f5789162481219e771610f8b721
|
[
"ECL-2.0",
"Apache-2.0"
] | 2
|
2015-06-12T07:32:58.000Z
|
2018-05-27T07:04:52.000Z
|
controller/gui.py
|
HighwayFlocking/HighwayFlocking
|
e870579d11574f5789162481219e771610f8b721
|
[
"ECL-2.0",
"Apache-2.0"
] | 8
|
2015-06-11T15:19:08.000Z
|
2019-10-08T13:18:52.000Z
|
#coding: utf-8
# Copyright 2015 Sindre Ilebekk Johansen and Andreas Sløgedal Løvland
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
import os
import threading
from datetime import time
import subprocess
from PySide import QtCore, QtGui
from PySide.QtCore import QTimer
from lib.gui.controller_ui import Ui_MainWindow
import configs
import config as cfg
from lib.simulation import Simulator, SimulatorIsClosedException
logger = logging.getLogger(__name__)
configurations = (
('just_cars', configs.JUST_CARS),
('oncoming', configs.ONCOMING),
('oncoming + merging', configs.ONCOMING_ONRAMP),
('oncoming + merging + buses', configs.ONCOMING_ONRAMP_BUS),
('oncoming + merging + emergency vehicles', configs.ONCOMING_ONRAMP_EMERGENCY),
('oncoming + merging + buses + emergency vehicles', configs.ONCOMING_ONRAMP_BUS_EMERGENCY),
('symetric', configs.SYMETRIC)
)
class ControllerMainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(ControllerMainWindow, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setup_configurations()
self.assign_widgets()
self.simulator = None
self.ui.stop.setEnabled(False)
self.timer = QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(1000)
self.show()
desktop = QtGui.QDesktopWidget().availableGeometry()
y = self.y()
x = (desktop.width() - 1280) / 2 - self.width() - 50
self.move(x, y)
def setup_configurations(self):
for i, config in enumerate(configurations):
self.ui.configuration.insertItem(i, config[0])
def assign_widgets(self):
self.ui.start.clicked.connect(self.start_clicked)
self.ui.stop.clicked.connect(self.stop_clicked)
def on_about_to_quit(self):
if self.simulator:
self.simulator.close()
def stop_clicked(self):
self.ui.stop.setEnabled(False)
if self.simulator:
self.simulator.close()
self.ui.fixed.setEnabled(True)
self.simulator = None
self.ui.start.setText("Start Simulation")
def start_clicked(self):
self.ui.start.setEnabled(False)
self.ui.fixed.setEnabled(False)
if self.simulator:
self.update_simulator()
else:
self.simulator = Simulator(fixed_time_step=self.ui.fixed.isChecked())
threading.Thread(target=self.start_simulator, name="Start simulator Thread").start()
self.ui.status.setText(
"<html><head/><body><p><span style=\"color:#222;\">Simulator is Starting</span></p></body></html>")
self.ui.start.setText("Restart Simulation")
def start_simulator(self):
self.simulator.start_and_connect()
self.update_simulator()
def update_simulator(self):
base_config = configurations[self.ui.configuration.currentIndex()][1]
throughput = self.ui.throughput.value()
config = configs.througput(base_config, throughput=throughput)
logger.info("Max Waits: %s", [sp['max_wait'] for sp in config['spawners']])
logger.info("Min Waits: %s", [sp['min_wait'] for sp in config['spawners']])
logger.info('Pausing the simulation')
self.simulator.set_paused(True)
logger.info('Removing all vehicles')
self.simulator.remove_all_vehicles()
logger.info('Resetting the spawners')
self.simulator.reset_all_spawners()
logger.info('Configuring the spawners')
for spawner_conf in config['spawners']:
self.simulator.configure_spawner(spawner_conf)
logger.info('Starting the simulation')
self.simulator.set_paused(False)
logger.info('Resetting the stats')
self.simulator.reset_stats()
self.simulator.clear_queue()
self.ui.start.setEnabled(True)
self.ui.stop.setEnabled(True)
def update(self):
if self.simulator:
try:
stats = self.simulator.get_newest_stats()
if stats:
minutes, seconds = divmod(int(stats['time']), 60)
stats['time'] = time(minute=minutes, second=seconds)
self.ui.current_values.setText(
"""Time: {time:%M:%S}
Current Throughput: From City: {throughputs[0]}, To City: {throughputs[1]}
Incidents: {incidents}
Vehicles Spawned: {spawned}
Vehicles on Road: {onroad}""".format(**stats))
self.ui.status.setText(
"<html><head/><body><p><span style=\"color:#00b548;\">Simulator is Running</span></p></body></html>")
except SimulatorIsClosedException:
self.ui.status.setText(
"<html><head/><body><p><span style=\"color:#b50003;\">Simulator is not Running</span></p></body></html>")
self.simulator.close()
self.simulator = None
self.ui.start.setText("Start Simulation")
self.ui.stop.setEnabled(False)
self.ui.fixed.setEnabled(True)
return
def main():
fh = logging.FileHandler('gui.log')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(funcName)s - %(message)s')
fh.setFormatter(formatter)
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
app = QtGui.QApplication(sys.argv)
if not cfg.SIMULATOR_LOCATION:
a=QtGui.QMessageBox.critical(None,'No Simulator!',"Could not find the simulator!", QtGui.QMessageBox.Abort)
return
controllerWindow = ControllerMainWindow()
app.aboutToQuit.connect(controllerWindow.on_about_to_quit)
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| 34.967213
| 125
| 0.64932
|
import sys
import logging
import os
import threading
from datetime import time
import subprocess
from PySide import QtCore, QtGui
from PySide.QtCore import QTimer
from lib.gui.controller_ui import Ui_MainWindow
import configs
import config as cfg
from lib.simulation import Simulator, SimulatorIsClosedException
logger = logging.getLogger(__name__)
configurations = (
('just_cars', configs.JUST_CARS),
('oncoming', configs.ONCOMING),
('oncoming + merging', configs.ONCOMING_ONRAMP),
('oncoming + merging + buses', configs.ONCOMING_ONRAMP_BUS),
('oncoming + merging + emergency vehicles', configs.ONCOMING_ONRAMP_EMERGENCY),
('oncoming + merging + buses + emergency vehicles', configs.ONCOMING_ONRAMP_BUS_EMERGENCY),
('symetric', configs.SYMETRIC)
)
class ControllerMainWindow(QtGui.QMainWindow):
def __init__(self, parent=None):
super(ControllerMainWindow, self).__init__(parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
self.setup_configurations()
self.assign_widgets()
self.simulator = None
self.ui.stop.setEnabled(False)
self.timer = QTimer(self)
self.timer.timeout.connect(self.update)
self.timer.start(1000)
self.show()
desktop = QtGui.QDesktopWidget().availableGeometry()
y = self.y()
x = (desktop.width() - 1280) / 2 - self.width() - 50
self.move(x, y)
def setup_configurations(self):
for i, config in enumerate(configurations):
self.ui.configuration.insertItem(i, config[0])
def assign_widgets(self):
self.ui.start.clicked.connect(self.start_clicked)
self.ui.stop.clicked.connect(self.stop_clicked)
def on_about_to_quit(self):
if self.simulator:
self.simulator.close()
def stop_clicked(self):
self.ui.stop.setEnabled(False)
if self.simulator:
self.simulator.close()
self.ui.fixed.setEnabled(True)
self.simulator = None
self.ui.start.setText("Start Simulation")
def start_clicked(self):
self.ui.start.setEnabled(False)
self.ui.fixed.setEnabled(False)
if self.simulator:
self.update_simulator()
else:
self.simulator = Simulator(fixed_time_step=self.ui.fixed.isChecked())
threading.Thread(target=self.start_simulator, name="Start simulator Thread").start()
self.ui.status.setText(
"<html><head/><body><p><span style=\"color:
self.ui.start.setText("Restart Simulation")
def start_simulator(self):
self.simulator.start_and_connect()
self.update_simulator()
def update_simulator(self):
base_config = configurations[self.ui.configuration.currentIndex()][1]
throughput = self.ui.throughput.value()
config = configs.througput(base_config, throughput=throughput)
logger.info("Max Waits: %s", [sp['max_wait'] for sp in config['spawners']])
logger.info("Min Waits: %s", [sp['min_wait'] for sp in config['spawners']])
logger.info('Pausing the simulation')
self.simulator.set_paused(True)
logger.info('Removing all vehicles')
self.simulator.remove_all_vehicles()
logger.info('Resetting the spawners')
self.simulator.reset_all_spawners()
logger.info('Configuring the spawners')
for spawner_conf in config['spawners']:
self.simulator.configure_spawner(spawner_conf)
logger.info('Starting the simulation')
self.simulator.set_paused(False)
logger.info('Resetting the stats')
self.simulator.reset_stats()
self.simulator.clear_queue()
self.ui.start.setEnabled(True)
self.ui.stop.setEnabled(True)
def update(self):
if self.simulator:
try:
stats = self.simulator.get_newest_stats()
if stats:
minutes, seconds = divmod(int(stats['time']), 60)
stats['time'] = time(minute=minutes, second=seconds)
self.ui.current_values.setText(
"""Time: {time:%M:%S}
Current Throughput: From City: {throughputs[0]}, To City: {throughputs[1]}
Incidents: {incidents}
Vehicles Spawned: {spawned}
Vehicles on Road: {onroad}""".format(**stats))
self.ui.status.setText(
"<html><head/><body><p><span style=\"color: except SimulatorIsClosedException:
self.ui.status.setText(
"<html><head/><body><p><span style=\"color: self.simulator.close()
self.simulator = None
self.ui.start.setText("Start Simulation")
self.ui.stop.setEnabled(False)
self.ui.fixed.setEnabled(True)
return
def main():
fh = logging.FileHandler('gui.log')
fh.setLevel(logging.INFO)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(funcName)s - %(message)s')
fh.setFormatter(formatter)
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
logger.addHandler(fh)
app = QtGui.QApplication(sys.argv)
if not cfg.SIMULATOR_LOCATION:
a=QtGui.QMessageBox.critical(None,'No Simulator!',"Could not find the simulator!", QtGui.QMessageBox.Abort)
return
controllerWindow = ControllerMainWindow()
app.aboutToQuit.connect(controllerWindow.on_about_to_quit)
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| true
| true
|
1c497acc563c98424984a3eed65eb8d2e59387b3
| 563
|
py
|
Python
|
pyqt/pyqt5-master/src/windows/Background2.py
|
Ding-zhenke/Dcount-s-notebook
|
16c29ac7d076c466e053f1b8db4a7f4e43f67a24
|
[
"MIT"
] | null | null | null |
pyqt/pyqt5-master/src/windows/Background2.py
|
Ding-zhenke/Dcount-s-notebook
|
16c29ac7d076c466e053f1b8db4a7f4e43f67a24
|
[
"MIT"
] | null | null | null |
pyqt/pyqt5-master/src/windows/Background2.py
|
Ding-zhenke/Dcount-s-notebook
|
16c29ac7d076c466e053f1b8db4a7f4e43f67a24
|
[
"MIT"
] | 2
|
2019-06-18T05:53:26.000Z
|
2019-06-19T03:26:02.000Z
|
'''
使用多种方式设置窗口背景色和背景图片
1. QSS
2. QPalette
3. 直接绘制
'''
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class Background2(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("绘制背景图片")
def paintEvent(self, event):
painter = QPainter(self)
pixmap = QPixmap('./images/screen1.jpg')
painter.drawPixmap(self.rect(),pixmap)
if __name__ == "__main__":
app = QApplication(sys.argv)
form = Background2()
form.show()
sys.exit(app.exec_())
| 18.766667
| 48
| 0.646536
|
import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
class Background2(QWidget):
def __init__(self):
super().__init__()
self.setWindowTitle("绘制背景图片")
def paintEvent(self, event):
painter = QPainter(self)
pixmap = QPixmap('./images/screen1.jpg')
painter.drawPixmap(self.rect(),pixmap)
if __name__ == "__main__":
app = QApplication(sys.argv)
form = Background2()
form.show()
sys.exit(app.exec_())
| true
| true
|
1c497bb22e51089dbe0ccd8f10dc86c537c0c7d6
| 9,748
|
py
|
Python
|
train.py
|
ssahn3087/pedestrian_detection
|
d9a6cb9d10246941cff8575c803ab60b3a9d7d04
|
[
"MIT"
] | 1
|
2019-10-25T12:31:38.000Z
|
2019-10-25T12:31:38.000Z
|
train.py
|
ssahn3087/pedestrian_detection
|
d9a6cb9d10246941cff8575c803ab60b3a9d7d04
|
[
"MIT"
] | null | null | null |
train.py
|
ssahn3087/pedestrian_detection
|
d9a6cb9d10246941cff8575c803ab60b3a9d7d04
|
[
"MIT"
] | null | null | null |
import os
import torch
import numpy as np
import math
from torch.autograd import Variable
from datetime import datetime
from faster_rcnn import network
from faster_rcnn.network import init_data, data_to_variable
from faster_rcnn.network import train_net_params, print_weight_grad
from faster_rcnn.faster_rcnn_vgg import FasterRCNN as FasterRCNN_VGG
from faster_rcnn.faster_rcnn_res import FasterRCNN as FasterRCNN_RES
from faster_rcnn.utils.timer import Timer
from val import test, id_match_test
from faster_rcnn.roi_data_layer.sampler import sampler
from faster_rcnn.roi_data_layer.roidb import extract_roidb
from faster_rcnn.roi_data_layer.roibatchLoader import roibatchLoader
from faster_rcnn.fast_rcnn.config import cfg, cfg_from_file
try:
from termcolor import cprint
except ImportError:
cprint = None
try:
from pycrayon import CrayonClient
except ImportError:
CrayonClient = None
def log_print(text, color='blue', on_color=None, attrs=None):
if cprint is not None:
cprint(text, color=color, on_color=on_color, attrs=attrs)
else:
print(text)
# hyper-parameters
# ------------
imdb_name = 'voc_2007_trainval'
test_name = 'voc_2007_test'
# imdb_name = 'coco_2017_train'
# test_name = 'coco_2017_val'
# imdb_name = 'CaltechPedestrians_train'
# test_name = 'CaltechPedestrians_test'
cfg_file = 'experiments/cfgs/faster_rcnn_end2end.yml'
model_dir = 'data/pretrained_model/'
output_dir = 'models/saved_model3'
pre_model_name = 'voc_2007_trainval_14_vgg16_0.7_b1.h5'
pretrained_model = model_dir + pre_model_name
start_epoch = 1
end_epoch = 10
lr_decay_step = 5
lr_decay = 0.1
rand_seed = 1024
_DEBUG = True
use_tensorboard = True
remove_all_log = True # remove all historical experiments in TensorBoard
exp_name = None # the previous experiment name in TensorBoard
# ------------
if rand_seed is not None:
np.random.seed(rand_seed)
# load config
cfg_from_file(cfg_file)
fg_thresh = cfg.TRAIN.RPN_POSITIVE_OVERLAP
is_resnet = cfg.RESNET.IS_TRUE
batch_size = cfg.TRAIN.IMS_PER_BATCH
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
disp_interval = cfg.TRAIN.DISPLAY
log_interval = cfg.TRAIN.LOG_IMAGE_ITERS
save_interval = cfg.TRAIN.SNAPSHOT_ITERS
# load data
imdb, roidb, ratio_list, ratio_index = extract_roidb(imdb_name)
test_imdb, test_roidb, _, _ = extract_roidb(test_name)
train_size = len(roidb)
sampler_batch = sampler(train_size, batch_size, cfg.TRIPLET.IS_TRUE)
dataset = roibatchLoader(imdb, roidb, ratio_list, ratio_index, batch_size,
imdb.num_classes, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
sampler=sampler_batch, num_workers=0)
# load net
if is_resnet:
model_name = cfg.RESNET.MODEL
cfg.TRAIN.DOUBLE_BIAS = False
cfg.TRAIN.WEIGHT_DECAY = 0.0001
net = FasterRCNN_RES(classes=imdb.classes, debug=_DEBUG)
net.init_module()
else:
model_name = 'vgg16'
net = FasterRCNN_VGG(classes=imdb.classes, debug=_DEBUG)
net.init_module()
if cfg.TRIPLET.IS_TRUE:
model_name += '_' + cfg.TRIPLET.LOSS
# network.load_net(pretrained_model, net)
# person_key = 15 (pascal_voc) user_defined_coco_set = 1
#network.load_net_pedestrians(pretrained_model, net, person_key=15)
blob = init_data(is_cuda=True)
# set net to be prepared to train
net.cuda()
params = train_net_params(net, cfg, lr)
optimizer = torch.optim.SGD(params, momentum=momentum)
def make_dir(output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
make_dir(output_dir)
# tensorboad
use_tensorboard = use_tensorboard and CrayonClient is not None
if use_tensorboard:
print('TENSORBOARD IS ON')
cc = CrayonClient(hostname='127.0.0.1')
if remove_all_log:
cc.remove_all_experiments()
if exp_name is None:
name = '{}_{}'.format(imdb_name, model_name)
exp_name = datetime.now().strftime(name+'_%m-%d_%H-%M')
exp = cc.create_experiment(exp_name)
else:
exp = cc.open_experiment(exp_name)
iters_per_epoch = int(train_size / batch_size)
# training
train_loss = 0
previous_precision = 0.
descend = 0
step_cnt = 0
cnt = 0
re_cnt = False
t = Timer()
t.tic()
from math import isnan
for epoch in range(start_epoch, end_epoch+1):
pf, tot = 0., 0
tp, fp, tn, fg, bg, tp_box, fg_box = 0., 0., 0., 0., 0., 0., 0.
rpn_cls, rpn_box, rcnn_cls, rcnn_box, sim_loss = 0., 0., 0., 0., 0.
net.train()
if epoch > 1 and (epoch-1) % lr_decay_step == 0:
lr *= lr_decay
params = train_net_params(net, cfg, lr)
optimizer = torch.optim.SGD(params, momentum=momentum)
data_iter = iter(dataloader)
for step in range(iters_per_epoch):
# get one batch
data = next(data_iter)
(im_data, im_info, gt_boxes, num_boxes) = data_to_variable(blob, data)
# forward
net.zero_grad()
net(im_data, im_info, gt_boxes, num_boxes)
if _DEBUG:
tp += float(net.tp)
tn += float(net.tn)
fp += float(net.fp)
fg += net.fg_cnt
bg += net.bg_cnt
tp_box += float(net.rpn.tp)
fg_box += net.rpn.fg_box
rpn_box += net.rpn.cross_entropy.data.cpu().numpy()[0]
rpn_cls += net.rpn.loss_box.data.cpu().numpy()[0]
rcnn_box += net.loss_box.data.cpu().numpy()[0]
rcnn_cls += net.cross_entropy.data.cpu().numpy()[0]
sim_loss += net.triplet_loss.data.cpu().numpy()[0] if cfg.TRIPLET.IS_TRUE else 0.
loss = net.rpn.loss + net.loss
if isnan(loss):
print(gt_boxes)
print(net.rpn.loss, net.loss)
train_loss += loss.data[0]
step_cnt += 1
cnt += 1
# backward
optimizer.zero_grad() # clear grad
loss.backward()
network.clip_gradient(net, 10.)
# print_weight_grad(net)
optimizer.step()
if step % disp_interval == 0 and step > 0:
duration = t.toc(average=False)
fps = step_cnt / duration
log_text = 'step %d, loss: %.4f, fps: %.2f (%.2fs per batch) --[epoch %2d] --[iter %4d/%4d]' % (
step, train_loss / step_cnt, fps, 1./fps, epoch, step, iters_per_epoch)
log_print(log_text, color='green', attrs=['bold'])
if _DEBUG:
if fg == 0 or bg == 0:
pass
else:
tot += 1
pf += tp/fg*100
match_rate = net.match/net.set * 100. if cfg.TRIPLET.IS_TRUE else 0.
log_print('\tEP: %.2f%% PR: %.2f%% TP: %.2f%%, TF: %.2f%%, fg/bg=(%d/%d), TD: %.2f%%' %
(tp_box/fg_box*100, tp/(tp+fp)*100, tp/fg*100., tn/bg*100., fg/step_cnt, bg/step_cnt, match_rate))
log_print('\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box: %.4f, sim_loss: %.4f' % (
rpn_cls/step_cnt, rpn_box/step_cnt, rcnn_cls/step_cnt, rcnn_box/step_cnt, sim_loss/step_cnt )
)
re_cnt = True
if use_tensorboard and cnt % log_interval == 0 and cnt > 0:
exp.add_scalar_value('train_loss', train_loss / step_cnt, step=cnt)
exp.add_scalar_value('learning_rate', lr, step=cnt)
if _DEBUG:
match_rate = net.match / net.set * 100. if cfg.TRIPLET.IS_TRUE else 0.
triplet_loss = net.triplet_loss.data.cpu().numpy() if cfg.TRIPLET.IS_TRUE else 0.
exp.add_scalar_value('true_positive', tp/fg*100., step=cnt)
exp.add_scalar_value('true_negative', tn/bg*100., step=cnt)
exp.add_scalar_value('precision', tp / (tp+fp) * 100., step=cnt)
exp.add_scalar_value('true_distance', match_rate, step=cnt)
losses = {'rpn_cls': float(rpn_cls/step_cnt),
'rpn_box': float(rpn_box/step_cnt),
'rcnn_cls': float(rcnn_cls/step_cnt),
'rcnn_box': float(rcnn_box/step_cnt),
'sim_loss': float(sim_loss/step_cnt)}
exp.add_scalar_dict(losses, step=cnt)
if re_cnt:
train_loss = 0
tp, fp, tn, fg, bg, tp_box, fg_box = 0., 0., 0., 0, 0, 0., 0
rpn_cls, rpn_box, rcnn_cls, rcnn_box, sim_loss = 0., 0., 0., 0., 0.
net.reset_match_count()
step_cnt = 0
t.tic()
re_cnt = False
# if epoch % save_interval == 0 and cnt > 0:
save_dir = os.path.join(output_dir, model_name)
make_dir(save_dir)
save_name = os.path.join(save_dir, '{}_{}_{}_{}_b{}.h5'
.format(imdb_name, epoch, model_name, fg_thresh, batch_size))
network.save_net(save_name, net)
print('save model: {}'.format(save_name))
if pf/tot > 80:
print('Entering Test Phase ...')
f = open('PrecisionAndRecall.txt', 'a')
prec, rec = test(save_name, net, test_imdb, test_roidb)
match = id_match_test(save_name, net, test_imdb, test_roidb, cfg.TRIPLET.LOSS) if cfg.TRIPLET.IS_TRUE else 0.
f.write(save_name + ' ----[prec: {:.2f}%, rec: {:.2f}%] / {:.2f}%\n'.format(prec, rec, match))
f.close()
if previous_precision == 0.:
previous_precision = prec
else:
if previous_precision > prec:
print('Precision decreased {:.2f}% -> {:.2f}% ...' \
.format(previous_precision, prec))
import warnings
warnings.warn('test set Precision decreased. Keep Watching')
else:
previous_precision = prec
| 36.237918
| 122
| 0.627924
|
import os
import torch
import numpy as np
import math
from torch.autograd import Variable
from datetime import datetime
from faster_rcnn import network
from faster_rcnn.network import init_data, data_to_variable
from faster_rcnn.network import train_net_params, print_weight_grad
from faster_rcnn.faster_rcnn_vgg import FasterRCNN as FasterRCNN_VGG
from faster_rcnn.faster_rcnn_res import FasterRCNN as FasterRCNN_RES
from faster_rcnn.utils.timer import Timer
from val import test, id_match_test
from faster_rcnn.roi_data_layer.sampler import sampler
from faster_rcnn.roi_data_layer.roidb import extract_roidb
from faster_rcnn.roi_data_layer.roibatchLoader import roibatchLoader
from faster_rcnn.fast_rcnn.config import cfg, cfg_from_file
try:
from termcolor import cprint
except ImportError:
cprint = None
try:
from pycrayon import CrayonClient
except ImportError:
CrayonClient = None
def log_print(text, color='blue', on_color=None, attrs=None):
if cprint is not None:
cprint(text, color=color, on_color=on_color, attrs=attrs)
else:
print(text)
imdb_name = 'voc_2007_trainval'
test_name = 'voc_2007_test'
cfg_file = 'experiments/cfgs/faster_rcnn_end2end.yml'
model_dir = 'data/pretrained_model/'
output_dir = 'models/saved_model3'
pre_model_name = 'voc_2007_trainval_14_vgg16_0.7_b1.h5'
pretrained_model = model_dir + pre_model_name
start_epoch = 1
end_epoch = 10
lr_decay_step = 5
lr_decay = 0.1
rand_seed = 1024
_DEBUG = True
use_tensorboard = True
remove_all_log = True exp_name = None
if rand_seed is not None:
np.random.seed(rand_seed)
cfg_from_file(cfg_file)
fg_thresh = cfg.TRAIN.RPN_POSITIVE_OVERLAP
is_resnet = cfg.RESNET.IS_TRUE
batch_size = cfg.TRAIN.IMS_PER_BATCH
lr = cfg.TRAIN.LEARNING_RATE
momentum = cfg.TRAIN.MOMENTUM
disp_interval = cfg.TRAIN.DISPLAY
log_interval = cfg.TRAIN.LOG_IMAGE_ITERS
save_interval = cfg.TRAIN.SNAPSHOT_ITERS
imdb, roidb, ratio_list, ratio_index = extract_roidb(imdb_name)
test_imdb, test_roidb, _, _ = extract_roidb(test_name)
train_size = len(roidb)
sampler_batch = sampler(train_size, batch_size, cfg.TRIPLET.IS_TRUE)
dataset = roibatchLoader(imdb, roidb, ratio_list, ratio_index, batch_size,
imdb.num_classes, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size,
sampler=sampler_batch, num_workers=0)
if is_resnet:
model_name = cfg.RESNET.MODEL
cfg.TRAIN.DOUBLE_BIAS = False
cfg.TRAIN.WEIGHT_DECAY = 0.0001
net = FasterRCNN_RES(classes=imdb.classes, debug=_DEBUG)
net.init_module()
else:
model_name = 'vgg16'
net = FasterRCNN_VGG(classes=imdb.classes, debug=_DEBUG)
net.init_module()
if cfg.TRIPLET.IS_TRUE:
model_name += '_' + cfg.TRIPLET.LOSS
blob = init_data(is_cuda=True)
net.cuda()
params = train_net_params(net, cfg, lr)
optimizer = torch.optim.SGD(params, momentum=momentum)
def make_dir(output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
make_dir(output_dir)
use_tensorboard = use_tensorboard and CrayonClient is not None
if use_tensorboard:
print('TENSORBOARD IS ON')
cc = CrayonClient(hostname='127.0.0.1')
if remove_all_log:
cc.remove_all_experiments()
if exp_name is None:
name = '{}_{}'.format(imdb_name, model_name)
exp_name = datetime.now().strftime(name+'_%m-%d_%H-%M')
exp = cc.create_experiment(exp_name)
else:
exp = cc.open_experiment(exp_name)
iters_per_epoch = int(train_size / batch_size)
train_loss = 0
previous_precision = 0.
descend = 0
step_cnt = 0
cnt = 0
re_cnt = False
t = Timer()
t.tic()
from math import isnan
for epoch in range(start_epoch, end_epoch+1):
pf, tot = 0., 0
tp, fp, tn, fg, bg, tp_box, fg_box = 0., 0., 0., 0., 0., 0., 0.
rpn_cls, rpn_box, rcnn_cls, rcnn_box, sim_loss = 0., 0., 0., 0., 0.
net.train()
if epoch > 1 and (epoch-1) % lr_decay_step == 0:
lr *= lr_decay
params = train_net_params(net, cfg, lr)
optimizer = torch.optim.SGD(params, momentum=momentum)
data_iter = iter(dataloader)
for step in range(iters_per_epoch):
data = next(data_iter)
(im_data, im_info, gt_boxes, num_boxes) = data_to_variable(blob, data)
net.zero_grad()
net(im_data, im_info, gt_boxes, num_boxes)
if _DEBUG:
tp += float(net.tp)
tn += float(net.tn)
fp += float(net.fp)
fg += net.fg_cnt
bg += net.bg_cnt
tp_box += float(net.rpn.tp)
fg_box += net.rpn.fg_box
rpn_box += net.rpn.cross_entropy.data.cpu().numpy()[0]
rpn_cls += net.rpn.loss_box.data.cpu().numpy()[0]
rcnn_box += net.loss_box.data.cpu().numpy()[0]
rcnn_cls += net.cross_entropy.data.cpu().numpy()[0]
sim_loss += net.triplet_loss.data.cpu().numpy()[0] if cfg.TRIPLET.IS_TRUE else 0.
loss = net.rpn.loss + net.loss
if isnan(loss):
print(gt_boxes)
print(net.rpn.loss, net.loss)
train_loss += loss.data[0]
step_cnt += 1
cnt += 1
optimizer.zero_grad() loss.backward()
network.clip_gradient(net, 10.)
optimizer.step()
if step % disp_interval == 0 and step > 0:
duration = t.toc(average=False)
fps = step_cnt / duration
log_text = 'step %d, loss: %.4f, fps: %.2f (%.2fs per batch) --[epoch %2d] --[iter %4d/%4d]' % (
step, train_loss / step_cnt, fps, 1./fps, epoch, step, iters_per_epoch)
log_print(log_text, color='green', attrs=['bold'])
if _DEBUG:
if fg == 0 or bg == 0:
pass
else:
tot += 1
pf += tp/fg*100
match_rate = net.match/net.set * 100. if cfg.TRIPLET.IS_TRUE else 0.
log_print('\tEP: %.2f%% PR: %.2f%% TP: %.2f%%, TF: %.2f%%, fg/bg=(%d/%d), TD: %.2f%%' %
(tp_box/fg_box*100, tp/(tp+fp)*100, tp/fg*100., tn/bg*100., fg/step_cnt, bg/step_cnt, match_rate))
log_print('\trpn_cls: %.4f, rpn_box: %.4f, rcnn_cls: %.4f, rcnn_box: %.4f, sim_loss: %.4f' % (
rpn_cls/step_cnt, rpn_box/step_cnt, rcnn_cls/step_cnt, rcnn_box/step_cnt, sim_loss/step_cnt )
)
re_cnt = True
if use_tensorboard and cnt % log_interval == 0 and cnt > 0:
exp.add_scalar_value('train_loss', train_loss / step_cnt, step=cnt)
exp.add_scalar_value('learning_rate', lr, step=cnt)
if _DEBUG:
match_rate = net.match / net.set * 100. if cfg.TRIPLET.IS_TRUE else 0.
triplet_loss = net.triplet_loss.data.cpu().numpy() if cfg.TRIPLET.IS_TRUE else 0.
exp.add_scalar_value('true_positive', tp/fg*100., step=cnt)
exp.add_scalar_value('true_negative', tn/bg*100., step=cnt)
exp.add_scalar_value('precision', tp / (tp+fp) * 100., step=cnt)
exp.add_scalar_value('true_distance', match_rate, step=cnt)
losses = {'rpn_cls': float(rpn_cls/step_cnt),
'rpn_box': float(rpn_box/step_cnt),
'rcnn_cls': float(rcnn_cls/step_cnt),
'rcnn_box': float(rcnn_box/step_cnt),
'sim_loss': float(sim_loss/step_cnt)}
exp.add_scalar_dict(losses, step=cnt)
if re_cnt:
train_loss = 0
tp, fp, tn, fg, bg, tp_box, fg_box = 0., 0., 0., 0, 0, 0., 0
rpn_cls, rpn_box, rcnn_cls, rcnn_box, sim_loss = 0., 0., 0., 0., 0.
net.reset_match_count()
step_cnt = 0
t.tic()
re_cnt = False
save_dir = os.path.join(output_dir, model_name)
make_dir(save_dir)
save_name = os.path.join(save_dir, '{}_{}_{}_{}_b{}.h5'
.format(imdb_name, epoch, model_name, fg_thresh, batch_size))
network.save_net(save_name, net)
print('save model: {}'.format(save_name))
if pf/tot > 80:
print('Entering Test Phase ...')
f = open('PrecisionAndRecall.txt', 'a')
prec, rec = test(save_name, net, test_imdb, test_roidb)
match = id_match_test(save_name, net, test_imdb, test_roidb, cfg.TRIPLET.LOSS) if cfg.TRIPLET.IS_TRUE else 0.
f.write(save_name + ' ----[prec: {:.2f}%, rec: {:.2f}%] / {:.2f}%\n'.format(prec, rec, match))
f.close()
if previous_precision == 0.:
previous_precision = prec
else:
if previous_precision > prec:
print('Precision decreased {:.2f}% -> {:.2f}% ...' \
.format(previous_precision, prec))
import warnings
warnings.warn('test set Precision decreased. Keep Watching')
else:
previous_precision = prec
| true
| true
|
1c497cc803b8be1b63fb9e21a689f8082660622d
| 600
|
py
|
Python
|
tester.py
|
sjsafranek/asset_server
|
e036b87f629dade7f52a8a3e2b63ace52b32a88f
|
[
"MIT"
] | null | null | null |
tester.py
|
sjsafranek/asset_server
|
e036b87f629dade7f52a8a3e2b63ace52b32a88f
|
[
"MIT"
] | null | null | null |
tester.py
|
sjsafranek/asset_server
|
e036b87f629dade7f52a8a3e2b63ace52b32a88f
|
[
"MIT"
] | null | null | null |
import requests
r = requests.post("http://localhost:1111/api/v1/asset", files={
'uploadfile': open('test.jpg','rb')
})
print(r.text)
if 200 != r.status_code:
exit()
asset_id = r.json()['data']['asset_id']
r = requests.get("http://localhost:1111/api/v1/asset/{0}".format(asset_id))
print(r.text)
if 200 != r.status_code:
exit()
r = requests.delete("http://localhost:1111/api/v1/asset/{0}".format(asset_id))
print(r.text)
if 200 != r.status_code:
exit()
r = requests.get("http://localhost:1111/api/v1/asset/{0}".format(asset_id))
print(r.text)
if 200 != r.status_code:
exit()
| 22.222222
| 78
| 0.66
|
import requests
r = requests.post("http://localhost:1111/api/v1/asset", files={
'uploadfile': open('test.jpg','rb')
})
print(r.text)
if 200 != r.status_code:
exit()
asset_id = r.json()['data']['asset_id']
r = requests.get("http://localhost:1111/api/v1/asset/{0}".format(asset_id))
print(r.text)
if 200 != r.status_code:
exit()
r = requests.delete("http://localhost:1111/api/v1/asset/{0}".format(asset_id))
print(r.text)
if 200 != r.status_code:
exit()
r = requests.get("http://localhost:1111/api/v1/asset/{0}".format(asset_id))
print(r.text)
if 200 != r.status_code:
exit()
| true
| true
|
1c497e6e46579745df1fb77e24b216d3ac5774a7
| 21,159
|
py
|
Python
|
ktrain/vision/models.py
|
husmen/ktrain
|
4147b0bd146deb513c6f94505908294a5163efac
|
[
"Apache-2.0"
] | null | null | null |
ktrain/vision/models.py
|
husmen/ktrain
|
4147b0bd146deb513c6f94505908294a5163efac
|
[
"Apache-2.0"
] | null | null | null |
ktrain/vision/models.py
|
husmen/ktrain
|
4147b0bd146deb513c6f94505908294a5163efac
|
[
"Apache-2.0"
] | null | null | null |
from ..imports import *
from .. import utils as U
from .wrn import create_wide_residual_network
PRETRAINED_RESNET50 = 'pretrained_resnet50'
PRETRAINED_MOBILENET = 'pretrained_mobilenet'
PRETRAINED_MOBILENETV3 = 'pretrained_mobilenetv3'
PRETRAINED_INCEPTION = 'pretrained_inception'
PRETRAINED_EFFICIENTNETB1 = 'pretrained_efficientnetb1'
PRETRAINED_EFFICIENTNETB7 = 'pretrained_efficientnetb7'
RESNET50 = 'resnet50'
MOBILENET = 'mobilenet'
MOBILENETV3 = 'mobilenetv3'
INCEPTION = 'inception'
EFFICIENTNETB1 = 'efficientnetb1'
EFFICIENTNETB7 = 'efficientnetb7'
CNN = 'default_cnn'
WRN22 = 'wrn22'
PRETRAINED_MODELS = [
PRETRAINED_RESNET50, PRETRAINED_MOBILENET, PRETRAINED_MOBILENETV3,
PRETRAINED_INCEPTION, PRETRAINED_EFFICIENTNETB1, PRETRAINED_EFFICIENTNETB7
]
PREDEFINED_MODELS = PRETRAINED_MODELS + [
RESNET50, MOBILENET, MOBILENETV3, INCEPTION, EFFICIENTNETB1, EFFICIENTNETB7
]
IMAGE_CLASSIFIERS = {
PRETRAINED_RESNET50: '50-layer Residual Network (pretrained on ImageNet)',
RESNET50: '50-layer Resididual Network (randomly initialized) [https://arxiv.org/abs/1512.03385]',
PRETRAINED_MOBILENET: 'MobileNet Neural Network (pretrained on ImageNet)',
MOBILENET: 'MobileNet Neural Network (randomly initialized) [https://arxiv.org/abs/1704.04861]',
PRETRAINED_MOBILENETV3: 'MobileNetV3-Small Neural Network (pretrained on ImageNet)',
MOBILENETV3: 'MobileNetV3-Small Neural Network (randomly initialized) [https://arxiv.org/abs/1905.02244]',
PRETRAINED_INCEPTION: 'Inception Version 3 (pretrained on ImageNet)',
INCEPTION: 'Inception Version 3 (randomly initialized) [http://arxiv.org/abs/1512.00567]',
PRETRAINED_EFFICIENTNETB1: 'EfficientNet-B1 Neural Network (pretrained on ImageNet)',
EFFICIENTNETB1: 'EfficientNet-B1 Neural Network (pretrained on ImageNet) [https://arxiv.org/abs/1905.11946]',
PRETRAINED_EFFICIENTNETB7: 'EfficientNet-B7 Neural Network (pretrained on ImageNet)',
EFFICIENTNETB7: 'EfficientNet-B7 Neural Network (pretrained on ImageNet) [https://arxiv.org/abs/1905.11946]',
WRN22: '22-layer Wide Residual Network (randomly initialized)',
CNN : 'a default LeNet-like Convolutional Neural Network'}
def print_image_classifiers():
for k,v in IMAGE_CLASSIFIERS.items():
print("%s: %s" % (k,v))
def print_image_regression_models():
for k,v in IMAGE_CLASSIFIERS.items():
print("%s: %s" % (k,v))
def pretrained_datagen(data, name):
if not data or not U.is_iter(data): return
idg = data.image_data_generator
if name == PRETRAINED_RESNET50:
idg.preprocessing_function = pre_resnet50
idg.ktrain_preproc = 'resnet50'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_MOBILENET:
idg.preprocessing_function = pre_mobilenet
idg.ktrain_preproc = 'mobilenet'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_MOBILENETV3:
idg.preprocessing_function = pre_mobilenetv3small
idg.ktrain_preproc = 'mobilenetv3'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_INCEPTION:
idg.preprocessing_function = pre_inception
idg.ktrain_preproc = 'inception'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_EFFICIENTNETB1 or name == PRETRAINED_EFFICIENTNETB7:
idg.preprocessing_function = pre_efficientnet
idg.ktrain_preproc = 'efficientnet'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
return
def image_classifier(name,
train_data,
val_data=None,
freeze_layers=None,
metrics=['accuracy'],
optimizer_name = U.DEFAULT_OPT,
multilabel=None,
pt_fc = [],
pt_ps = [],
verbose=1):
"""
```
Returns a pre-defined/pre-trained model ready to be trained/fine-tuned
for multi-class classification. By default, all layers are
trainable/unfrozen.
Args:
name (string): one of model shown on ktrain.vision.print_image_classifiers
train_data (image.Iterator): train data. Note: Will be manipulated here!
val_data (image.Iterator): validation data. Note: Will be manipulated here!
freeze_layers (int): number of beginning layers to make untrainable
If None, then all layers except new Dense layers
will be frozen/untrainable.
metrics (list): metrics to use
optimizer_name(str): name of Keras optimizer (e.g., 'adam', 'sgd')
multilabel(bool): If True, model will be build to support
multilabel classificaiton (labels are not mutually exclusive).
If False, binary/multiclassification model will be returned.
If None, multilabel status will be inferred from data.
pt_fc (list of ints): number of hidden units in extra Dense layers
before final Dense layer of pretrained model.
Only takes effect if name in PRETRAINED_MODELS
pt_ps (list of floats): dropout probabilities to use before
each extra Dense layer in pretrained model.
Only takes effect if name in PRETRAINED_MODELS
verbose (int): verbosity
Return:
model(Model): the compiled model ready to be fine-tuned/trained
```
"""
return image_model(name, train_data, val_data=val_data, freeze_layers=freeze_layers,
metrics=metrics, optimizer_name=optimizer_name, multilabel=multilabel,
pt_fc=pt_fc, pt_ps=pt_ps, verbose=verbose)
def image_regression_model(name,
train_data,
val_data=None,
freeze_layers=None,
metrics=['mae'],
optimizer_name = U.DEFAULT_OPT,
pt_fc = [],
pt_ps = [],
verbose=1):
"""
```
Returns a pre-defined/pre-trained model ready to be trained/fine-tuned
for multi-class classification. By default, all layers are
trainable/unfrozen.
Args:
name (string): one of model shown on ktrain.vision.print_image_regression_models
train_data (image.Iterator): train data. Note: Will be manipulated here!
val_data (image.Iterator): validation data. Note: Will be manipulated here!
freeze_layers (int): number of beginning layers to make untrainable
If None, then all layers except new Dense layers
will be frozen/untrainable.
metrics (list): metrics to use
optimizer_name(str): name of Keras optimizer (e.g., 'adam', 'sgd')
multilabel(bool): If True, model will be build to support
multilabel classificaiton (labels are not mutually exclusive).
If False, binary/multiclassification model will be returned.
If None, multilabel status will be inferred from data.
pt_fc (list of ints): number of hidden units in extra Dense layers
before final Dense layer of pretrained model.
Only takes effect if name in PRETRAINED_MODELS
pt_ps (list of floats): dropout probabilities to use before
each extra Dense layer in pretrained model.
Only takes effect if name in PRETRAINED_MODELS
verbose (int): verbosity
Return:
model(Model): the compiled model ready to be fine-tuned/trained
```
"""
return image_model(name, train_data, val_data=val_data, freeze_layers=freeze_layers,
metrics=metrics, optimizer_name=optimizer_name, multilabel=False,
pt_fc=pt_fc, pt_ps=pt_ps, verbose=verbose)
def image_model( name,
train_data,
val_data=None,
freeze_layers=None,
metrics=['accuracy'],
optimizer_name = U.DEFAULT_OPT,
multilabel=None,
pt_fc = [],
pt_ps = [],
verbose=1):
"""
```
Returns a pre-defined/pre-trained model ready to be trained/fine-tuned
for multi-class classification or regression. By default, all layers are
trainable/unfrozen.
Args:
name (string): one of model shown on ktrain.vision.print_image_classifiers
train_data (image.Iterator): train data. Note: Will be manipulated here!
val_data (image.Iterator): validation data. Note: Will be manipulated here!
freeze_layers (int): number of beginning layers to make untrainable
If None, then all layers except new Dense layers
will be frozen/untrainable.
metrics (list): metrics to use
optimizer_name(str): name of Keras optimizer (e.g., 'adam', 'sgd')
multilabel(bool): If True, model will be build to support
multilabel classificaiton (labels are not mutually exclusive).
If False, binary/multiclassification model will be returned.
If None, multilabel status will be inferred from data.
pt_fc (list of ints): number of hidden units in extra Dense layers
before final Dense layer of pretrained model.
Only takes effect if name in PRETRAINED_MODELS
pt_ps (list of floats): dropout probabilities to use before
each extra Dense layer in pretrained model.
Only takes effect if name in PRETRAINED_MODELS
verbose (int): verbosity
Return:
model(Model): the compiled model ready to be fine-tuned/trained
```
"""
# arg check
U.data_arg_check(train_data=train_data, train_required=True)
if name not in list(IMAGE_CLASSIFIERS.keys()):
raise ValueError('Unknown or unsupported model: %s' % (name))
if not U.is_iter(train_data):
raise ValueError('train_data must be an Keras iterator ' +\
'(e.g., DirectoryIterator, DataframIterator, '+ \
'NumpyArrayIterator) - please use the ktrain.data.images_from* ' +\
'functions')
# check for MobileNetV3
if name in [PRETRAINED_MOBILENETV3, MOBILENETV3] and not HAS_MOBILENETV3:
raise ValueError(f'You chose {name}, but it does not appear to be available in your version of TensorFlow.')
# set pretrained flag
pretrained = True if name in PRETRAINED_MODELS else False
# adjust freeze_layers with warning
if not pretrained and freeze_layers is not None and freeze_layers > 0:
warnings.warn('Only pretrained models (e.g., pretrained_resnet50) support freeze_layers. ' +\
'Setting freeze_layers to 0. Use one of the following models if' +\
'desiring a model pretrained on ImageNet: %s' % (PRETRAINED_MODELS))
freeze_layers = 0
if pretrained and val_data is None:
raise ValueError('val_data is required if selecting a pretrained model, '+\
'as normalization scheme will be altered.')
# adjust the data augmentation based on model selected
if pretrained:
pretrained_datagen(train_data, name)
pretrained_datagen(val_data, name)
U.vprint('The normalization scheme has been changed for use with a %s' % (name) +\
' model. If you decide to use a different model, please reload your' +\
' dataset with a ktrain.vision.data.images_from* function.\n', verbose=verbose)
# determine if multilabel
if multilabel is None:
multilabel = U.is_multilabel(train_data)
is_regression=False
if not multilabel and len(train_data[0][-1].shape) == 1: is_regression=True
# set loss and acivations
loss_func = 'categorical_crossentropy'
activation = 'softmax'
if multilabel:
loss_func = 'binary_crossentropy'
activation = 'sigmoid'
elif is_regression:
loss_func = 'mse'
activation = None
if metrics == ['accuracy']: metrics = ['mae']
U.vprint("Is Multi-Label? %s" % (multilabel), verbose=verbose)
U.vprint("Is Regression? %s" % (is_regression), verbose=verbose)
# determine number of classes and shape
num_classes = 1 if is_regression else U.nclasses_from_data(train_data)
input_shape = U.shape_from_data(train_data)
#------------
# build model
#------------
model = build_visionmodel(name,
num_classes,
input_shape=input_shape,
freeze_layers=freeze_layers,
activation=activation,
pt_fc = pt_fc,
pt_ps = pt_ps)
model.compile(optimizer=optimizer_name, loss=loss_func, metrics=metrics)
return model
def build_visionmodel(name,
num_classes,
input_shape=(224,224,3),
freeze_layers=2,
activation='softmax',
pt_fc=[],
pt_ps = []):
if name in PREDEFINED_MODELS:
model = build_predefined(name, num_classes,
input_shape=input_shape,
freeze_layers=freeze_layers,
activation=activation,
pt_fc = pt_fc,
pt_ps = pt_ps)
elif name == CNN:
model = build_cnn(num_classes,
input_shape=input_shape,
activation=activation)
elif name == WRN22:
model = create_wide_residual_network(input_shape, nb_classes=num_classes,
N=3, k=6, dropout=0.00,
activation=activation, verbose=0)
else:
raise ValueError('Unknown model: %s' % (name))
U.vprint('%s model created.' % (name))
return model
def build_cnn(num_classes,
input_shape=(28,28,1),
activation='softmax'):
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',
kernel_initializer='he_normal',input_shape=input_shape))
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',
kernel_initializer='he_normal'))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.20))
model.add(Conv2D(64, (3, 3), activation='relu',padding='same',
kernel_initializer='he_normal'))
model.add(Conv2D(64, (3, 3), activation='relu',padding='same',
kernel_initializer='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), activation='relu',padding='same',
kernel_initializer='he_normal'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(num_classes, activation=activation))
return model
def build_predefined(
name,
num_classes,
input_shape=(224,224,3),
freeze_layers=None,
activation='softmax',
pt_fc=[],
pt_ps=[]):
"""
```
Builds a pre-defined architecture supported in Keras.
Args:
name (str): one of ktrain.vision.model.PREDEFINED_MODELS
num_classes (int): # of classes
input_shape (tuple): the input shape including channels
freeze_layers (int): number of early layers to freeze.
Only takes effect if name in PRETRAINED_MODELS.
If None and name in PRETRAINED_MODELS,
all layers except the "custom head"
fully-connected (Dense) layers are frozen.
activation (str): name of the Keras activation to use in final layer
pt_fc (list of ints): number of hidden units in extra Dense layers
before final Dense layer of pretrained model
pt_ps (list of floats): dropout probabilities to use before
each extra Dense layer in pretrained model
```
"""
# default parameters
include_top = False
input_tensor = None
dropout = 0.5 # final dropout
# setup pretrained
weights = 'imagenet' if name in PRETRAINED_MODELS else None
# setup the pretrained network
if name in [RESNET50, PRETRAINED_RESNET50]:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
net = ResNet50(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [MOBILENET, PRETRAINED_MOBILENET]:
net = MobileNet(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [MOBILENETV3, PRETRAINED_MOBILENETV3]:
net = MobileNetV3Small(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [INCEPTION, PRETRAINED_INCEPTION]:
net = InceptionV3(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [EFFICIENTNETB1, PRETRAINED_EFFICIENTNETB1]:
net = EfficientNetB1(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [EFFICIENTNETB7, PRETRAINED_EFFICIENTNETB7]:
net = EfficientNetB7(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
else:
raise ValueError('Unsupported model: %s' % (name))
if freeze_layers is None:
for layer in net.layers:
layer.trainable = False
x = net.output
x = Flatten()(x)
# xtra FCs in pretrained model
if name in PRETRAINED_MODELS:
if len(pt_fc) != len(pt_ps):
raise ValueError('size off xtra_fc must match size of fc_dropouts')
for i, fc in enumerate(pt_fc):
p = pt_ps[i]
fc_name = "fc%s" % (i)
if p is not None:
x = Dropout(p)(x)
x = Dense(fc, activation='relu',
kernel_initializer='he_normal', name=fc_name)(x)
# final FC
x = Dropout(dropout)(x)
output_layer = Dense(num_classes, activation=activation, name=activation)(x)
model = Model(inputs=net.input, outputs=output_layer)
if freeze_layers is not None:
# set certain earlier layers as non-trainable
for layer in model.layers[:freeze_layers]:
layer.trainable = False
for layer in model.layers[freeze_layers:]:
layer.trainable = True
# set optimizer, loss, and metrics and return model
return model
| 42.745455
| 130
| 0.598658
|
from ..imports import *
from .. import utils as U
from .wrn import create_wide_residual_network
PRETRAINED_RESNET50 = 'pretrained_resnet50'
PRETRAINED_MOBILENET = 'pretrained_mobilenet'
PRETRAINED_MOBILENETV3 = 'pretrained_mobilenetv3'
PRETRAINED_INCEPTION = 'pretrained_inception'
PRETRAINED_EFFICIENTNETB1 = 'pretrained_efficientnetb1'
PRETRAINED_EFFICIENTNETB7 = 'pretrained_efficientnetb7'
RESNET50 = 'resnet50'
MOBILENET = 'mobilenet'
MOBILENETV3 = 'mobilenetv3'
INCEPTION = 'inception'
EFFICIENTNETB1 = 'efficientnetb1'
EFFICIENTNETB7 = 'efficientnetb7'
CNN = 'default_cnn'
WRN22 = 'wrn22'
PRETRAINED_MODELS = [
PRETRAINED_RESNET50, PRETRAINED_MOBILENET, PRETRAINED_MOBILENETV3,
PRETRAINED_INCEPTION, PRETRAINED_EFFICIENTNETB1, PRETRAINED_EFFICIENTNETB7
]
PREDEFINED_MODELS = PRETRAINED_MODELS + [
RESNET50, MOBILENET, MOBILENETV3, INCEPTION, EFFICIENTNETB1, EFFICIENTNETB7
]
IMAGE_CLASSIFIERS = {
PRETRAINED_RESNET50: '50-layer Residual Network (pretrained on ImageNet)',
RESNET50: '50-layer Resididual Network (randomly initialized) [https://arxiv.org/abs/1512.03385]',
PRETRAINED_MOBILENET: 'MobileNet Neural Network (pretrained on ImageNet)',
MOBILENET: 'MobileNet Neural Network (randomly initialized) [https://arxiv.org/abs/1704.04861]',
PRETRAINED_MOBILENETV3: 'MobileNetV3-Small Neural Network (pretrained on ImageNet)',
MOBILENETV3: 'MobileNetV3-Small Neural Network (randomly initialized) [https://arxiv.org/abs/1905.02244]',
PRETRAINED_INCEPTION: 'Inception Version 3 (pretrained on ImageNet)',
INCEPTION: 'Inception Version 3 (randomly initialized) [http://arxiv.org/abs/1512.00567]',
PRETRAINED_EFFICIENTNETB1: 'EfficientNet-B1 Neural Network (pretrained on ImageNet)',
EFFICIENTNETB1: 'EfficientNet-B1 Neural Network (pretrained on ImageNet) [https://arxiv.org/abs/1905.11946]',
PRETRAINED_EFFICIENTNETB7: 'EfficientNet-B7 Neural Network (pretrained on ImageNet)',
EFFICIENTNETB7: 'EfficientNet-B7 Neural Network (pretrained on ImageNet) [https://arxiv.org/abs/1905.11946]',
WRN22: '22-layer Wide Residual Network (randomly initialized)',
CNN : 'a default LeNet-like Convolutional Neural Network'}
def print_image_classifiers():
for k,v in IMAGE_CLASSIFIERS.items():
print("%s: %s" % (k,v))
def print_image_regression_models():
for k,v in IMAGE_CLASSIFIERS.items():
print("%s: %s" % (k,v))
def pretrained_datagen(data, name):
if not data or not U.is_iter(data): return
idg = data.image_data_generator
if name == PRETRAINED_RESNET50:
idg.preprocessing_function = pre_resnet50
idg.ktrain_preproc = 'resnet50'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_MOBILENET:
idg.preprocessing_function = pre_mobilenet
idg.ktrain_preproc = 'mobilenet'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_MOBILENETV3:
idg.preprocessing_function = pre_mobilenetv3small
idg.ktrain_preproc = 'mobilenetv3'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_INCEPTION:
idg.preprocessing_function = pre_inception
idg.ktrain_preproc = 'inception'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
elif name == PRETRAINED_EFFICIENTNETB1 or name == PRETRAINED_EFFICIENTNETB7:
idg.preprocessing_function = pre_efficientnet
idg.ktrain_preproc = 'efficientnet'
idg.rescale=None
idg.featurewise_center=False
idg.samplewise_center=False
idg.featurewise_std_normalization=False
idg.samplewise_std_normalization=False
idg.zca_whitening = False
return
def image_classifier(name,
train_data,
val_data=None,
freeze_layers=None,
metrics=['accuracy'],
optimizer_name = U.DEFAULT_OPT,
multilabel=None,
pt_fc = [],
pt_ps = [],
verbose=1):
return image_model(name, train_data, val_data=val_data, freeze_layers=freeze_layers,
metrics=metrics, optimizer_name=optimizer_name, multilabel=multilabel,
pt_fc=pt_fc, pt_ps=pt_ps, verbose=verbose)
def image_regression_model(name,
train_data,
val_data=None,
freeze_layers=None,
metrics=['mae'],
optimizer_name = U.DEFAULT_OPT,
pt_fc = [],
pt_ps = [],
verbose=1):
return image_model(name, train_data, val_data=val_data, freeze_layers=freeze_layers,
metrics=metrics, optimizer_name=optimizer_name, multilabel=False,
pt_fc=pt_fc, pt_ps=pt_ps, verbose=verbose)
def image_model( name,
train_data,
val_data=None,
freeze_layers=None,
metrics=['accuracy'],
optimizer_name = U.DEFAULT_OPT,
multilabel=None,
pt_fc = [],
pt_ps = [],
verbose=1):
U.data_arg_check(train_data=train_data, train_required=True)
if name not in list(IMAGE_CLASSIFIERS.keys()):
raise ValueError('Unknown or unsupported model: %s' % (name))
if not U.is_iter(train_data):
raise ValueError('train_data must be an Keras iterator ' +\
'(e.g., DirectoryIterator, DataframIterator, '+ \
'NumpyArrayIterator) - please use the ktrain.data.images_from* ' +\
'functions')
if name in [PRETRAINED_MOBILENETV3, MOBILENETV3] and not HAS_MOBILENETV3:
raise ValueError(f'You chose {name}, but it does not appear to be available in your version of TensorFlow.')
pretrained = True if name in PRETRAINED_MODELS else False
if not pretrained and freeze_layers is not None and freeze_layers > 0:
warnings.warn('Only pretrained models (e.g., pretrained_resnet50) support freeze_layers. ' +\
'Setting freeze_layers to 0. Use one of the following models if' +\
'desiring a model pretrained on ImageNet: %s' % (PRETRAINED_MODELS))
freeze_layers = 0
if pretrained and val_data is None:
raise ValueError('val_data is required if selecting a pretrained model, '+\
'as normalization scheme will be altered.')
if pretrained:
pretrained_datagen(train_data, name)
pretrained_datagen(val_data, name)
U.vprint('The normalization scheme has been changed for use with a %s' % (name) +\
' model. If you decide to use a different model, please reload your' +\
' dataset with a ktrain.vision.data.images_from* function.\n', verbose=verbose)
if multilabel is None:
multilabel = U.is_multilabel(train_data)
is_regression=False
if not multilabel and len(train_data[0][-1].shape) == 1: is_regression=True
loss_func = 'categorical_crossentropy'
activation = 'softmax'
if multilabel:
loss_func = 'binary_crossentropy'
activation = 'sigmoid'
elif is_regression:
loss_func = 'mse'
activation = None
if metrics == ['accuracy']: metrics = ['mae']
U.vprint("Is Multi-Label? %s" % (multilabel), verbose=verbose)
U.vprint("Is Regression? %s" % (is_regression), verbose=verbose)
num_classes = 1 if is_regression else U.nclasses_from_data(train_data)
input_shape = U.shape_from_data(train_data)
model = build_visionmodel(name,
num_classes,
input_shape=input_shape,
freeze_layers=freeze_layers,
activation=activation,
pt_fc = pt_fc,
pt_ps = pt_ps)
model.compile(optimizer=optimizer_name, loss=loss_func, metrics=metrics)
return model
def build_visionmodel(name,
num_classes,
input_shape=(224,224,3),
freeze_layers=2,
activation='softmax',
pt_fc=[],
pt_ps = []):
if name in PREDEFINED_MODELS:
model = build_predefined(name, num_classes,
input_shape=input_shape,
freeze_layers=freeze_layers,
activation=activation,
pt_fc = pt_fc,
pt_ps = pt_ps)
elif name == CNN:
model = build_cnn(num_classes,
input_shape=input_shape,
activation=activation)
elif name == WRN22:
model = create_wide_residual_network(input_shape, nb_classes=num_classes,
N=3, k=6, dropout=0.00,
activation=activation, verbose=0)
else:
raise ValueError('Unknown model: %s' % (name))
U.vprint('%s model created.' % (name))
return model
def build_cnn(num_classes,
input_shape=(28,28,1),
activation='softmax'):
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',
kernel_initializer='he_normal',input_shape=input_shape))
model.add(Conv2D(32, kernel_size=(3, 3),activation='relu',
kernel_initializer='he_normal'))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.20))
model.add(Conv2D(64, (3, 3), activation='relu',padding='same',
kernel_initializer='he_normal'))
model.add(Conv2D(64, (3, 3), activation='relu',padding='same',
kernel_initializer='he_normal'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(128, (3, 3), activation='relu',padding='same',
kernel_initializer='he_normal'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
model.add(Dense(num_classes, activation=activation))
return model
def build_predefined(
name,
num_classes,
input_shape=(224,224,3),
freeze_layers=None,
activation='softmax',
pt_fc=[],
pt_ps=[]):
include_top = False
input_tensor = None
dropout = 0.5
weights = 'imagenet' if name in PRETRAINED_MODELS else None
if name in [RESNET50, PRETRAINED_RESNET50]:
with warnings.catch_warnings():
warnings.simplefilter('ignore')
net = ResNet50(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [MOBILENET, PRETRAINED_MOBILENET]:
net = MobileNet(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [MOBILENETV3, PRETRAINED_MOBILENETV3]:
net = MobileNetV3Small(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [INCEPTION, PRETRAINED_INCEPTION]:
net = InceptionV3(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [EFFICIENTNETB1, PRETRAINED_EFFICIENTNETB1]:
net = EfficientNetB1(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
elif name in [EFFICIENTNETB7, PRETRAINED_EFFICIENTNETB7]:
net = EfficientNetB7(include_top=include_top,
weights=weights,
input_tensor=input_tensor,
input_shape = input_shape)
else:
raise ValueError('Unsupported model: %s' % (name))
if freeze_layers is None:
for layer in net.layers:
layer.trainable = False
x = net.output
x = Flatten()(x)
if name in PRETRAINED_MODELS:
if len(pt_fc) != len(pt_ps):
raise ValueError('size off xtra_fc must match size of fc_dropouts')
for i, fc in enumerate(pt_fc):
p = pt_ps[i]
fc_name = "fc%s" % (i)
if p is not None:
x = Dropout(p)(x)
x = Dense(fc, activation='relu',
kernel_initializer='he_normal', name=fc_name)(x)
x = Dropout(dropout)(x)
output_layer = Dense(num_classes, activation=activation, name=activation)(x)
model = Model(inputs=net.input, outputs=output_layer)
if freeze_layers is not None:
for layer in model.layers[:freeze_layers]:
layer.trainable = False
for layer in model.layers[freeze_layers:]:
layer.trainable = True
return model
| true
| true
|
1c497ec5d0c301db41eee7e775d56ae2985ce8dc
| 10,074
|
py
|
Python
|
octopus_deploy_swagger_client/models/root_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
octopus_deploy_swagger_client/models/root_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
octopus_deploy_swagger_client/models/root_resource.py
|
cvent/octopus-deploy-api-client
|
0e03e842e1beb29b132776aee077df570b88366a
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Octopus Server API
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: 2019.6.7+Branch.tags-2019.6.7.Sha.aa18dc6809953218c66f57eff7d26481d9b23d6a
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class RootResource(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'id': 'str',
'application': 'str',
'version': 'str',
'api_version': 'str',
'installation_id': 'str',
'is_early_access_program': 'bool',
'has_long_term_support': 'bool',
'last_modified_on': 'datetime',
'last_modified_by': 'str',
'links': 'dict(str, str)'
}
attribute_map = {
'id': 'Id',
'application': 'Application',
'version': 'Version',
'api_version': 'ApiVersion',
'installation_id': 'InstallationId',
'is_early_access_program': 'IsEarlyAccessProgram',
'has_long_term_support': 'HasLongTermSupport',
'last_modified_on': 'LastModifiedOn',
'last_modified_by': 'LastModifiedBy',
'links': 'Links'
}
def __init__(self, id=None, application=None, version=None, api_version=None, installation_id=None, is_early_access_program=False, has_long_term_support=False, last_modified_on=None, last_modified_by=None, links=None): # noqa: E501
"""RootResource - a model defined in Swagger""" # noqa: E501
self._id = None
self._application = None
self._version = None
self._api_version = None
self._installation_id = None
self._is_early_access_program = None
self._has_long_term_support = None
self._last_modified_on = None
self._last_modified_by = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if application is not None:
self.application = application
if version is not None:
self.version = version
if api_version is not None:
self.api_version = api_version
if installation_id is not None:
self.installation_id = installation_id
if is_early_access_program is not None:
self.is_early_access_program = is_early_access_program
if has_long_term_support is not None:
self.has_long_term_support = has_long_term_support
if last_modified_on is not None:
self.last_modified_on = last_modified_on
if last_modified_by is not None:
self.last_modified_by = last_modified_by
if links is not None:
self.links = links
@property
def id(self):
"""Gets the id of this RootResource. # noqa: E501
:return: The id of this RootResource. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this RootResource.
:param id: The id of this RootResource. # noqa: E501
:type: str
"""
self._id = id
@property
def application(self):
"""Gets the application of this RootResource. # noqa: E501
:return: The application of this RootResource. # noqa: E501
:rtype: str
"""
return self._application
@application.setter
def application(self, application):
"""Sets the application of this RootResource.
:param application: The application of this RootResource. # noqa: E501
:type: str
"""
self._application = application
@property
def version(self):
"""Gets the version of this RootResource. # noqa: E501
:return: The version of this RootResource. # noqa: E501
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""Sets the version of this RootResource.
:param version: The version of this RootResource. # noqa: E501
:type: str
"""
self._version = version
@property
def api_version(self):
"""Gets the api_version of this RootResource. # noqa: E501
:return: The api_version of this RootResource. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this RootResource.
:param api_version: The api_version of this RootResource. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def installation_id(self):
"""Gets the installation_id of this RootResource. # noqa: E501
:return: The installation_id of this RootResource. # noqa: E501
:rtype: str
"""
return self._installation_id
@installation_id.setter
def installation_id(self, installation_id):
"""Sets the installation_id of this RootResource.
:param installation_id: The installation_id of this RootResource. # noqa: E501
:type: str
"""
self._installation_id = installation_id
@property
def is_early_access_program(self):
"""Gets the is_early_access_program of this RootResource. # noqa: E501
:return: The is_early_access_program of this RootResource. # noqa: E501
:rtype: bool
"""
return self._is_early_access_program
@is_early_access_program.setter
def is_early_access_program(self, is_early_access_program):
"""Sets the is_early_access_program of this RootResource.
:param is_early_access_program: The is_early_access_program of this RootResource. # noqa: E501
:type: bool
"""
self._is_early_access_program = is_early_access_program
@property
def has_long_term_support(self):
"""Gets the has_long_term_support of this RootResource. # noqa: E501
:return: The has_long_term_support of this RootResource. # noqa: E501
:rtype: bool
"""
return self._has_long_term_support
@has_long_term_support.setter
def has_long_term_support(self, has_long_term_support):
"""Sets the has_long_term_support of this RootResource.
:param has_long_term_support: The has_long_term_support of this RootResource. # noqa: E501
:type: bool
"""
self._has_long_term_support = has_long_term_support
@property
def last_modified_on(self):
"""Gets the last_modified_on of this RootResource. # noqa: E501
:return: The last_modified_on of this RootResource. # noqa: E501
:rtype: datetime
"""
return self._last_modified_on
@last_modified_on.setter
def last_modified_on(self, last_modified_on):
"""Sets the last_modified_on of this RootResource.
:param last_modified_on: The last_modified_on of this RootResource. # noqa: E501
:type: datetime
"""
self._last_modified_on = last_modified_on
@property
def last_modified_by(self):
"""Gets the last_modified_by of this RootResource. # noqa: E501
:return: The last_modified_by of this RootResource. # noqa: E501
:rtype: str
"""
return self._last_modified_by
@last_modified_by.setter
def last_modified_by(self, last_modified_by):
"""Sets the last_modified_by of this RootResource.
:param last_modified_by: The last_modified_by of this RootResource. # noqa: E501
:type: str
"""
self._last_modified_by = last_modified_by
@property
def links(self):
"""Gets the links of this RootResource. # noqa: E501
:return: The links of this RootResource. # noqa: E501
:rtype: dict(str, str)
"""
return self._links
@links.setter
def links(self, links):
"""Sets the links of this RootResource.
:param links: The links of this RootResource. # noqa: E501
:type: dict(str, str)
"""
self._links = links
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RootResource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RootResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 28.782857
| 236
| 0.611376
|
import pprint
import re
import six
class RootResource(object):
swagger_types = {
'id': 'str',
'application': 'str',
'version': 'str',
'api_version': 'str',
'installation_id': 'str',
'is_early_access_program': 'bool',
'has_long_term_support': 'bool',
'last_modified_on': 'datetime',
'last_modified_by': 'str',
'links': 'dict(str, str)'
}
attribute_map = {
'id': 'Id',
'application': 'Application',
'version': 'Version',
'api_version': 'ApiVersion',
'installation_id': 'InstallationId',
'is_early_access_program': 'IsEarlyAccessProgram',
'has_long_term_support': 'HasLongTermSupport',
'last_modified_on': 'LastModifiedOn',
'last_modified_by': 'LastModifiedBy',
'links': 'Links'
}
def __init__(self, id=None, application=None, version=None, api_version=None, installation_id=None, is_early_access_program=False, has_long_term_support=False, last_modified_on=None, last_modified_by=None, links=None):
self._id = None
self._application = None
self._version = None
self._api_version = None
self._installation_id = None
self._is_early_access_program = None
self._has_long_term_support = None
self._last_modified_on = None
self._last_modified_by = None
self._links = None
self.discriminator = None
if id is not None:
self.id = id
if application is not None:
self.application = application
if version is not None:
self.version = version
if api_version is not None:
self.api_version = api_version
if installation_id is not None:
self.installation_id = installation_id
if is_early_access_program is not None:
self.is_early_access_program = is_early_access_program
if has_long_term_support is not None:
self.has_long_term_support = has_long_term_support
if last_modified_on is not None:
self.last_modified_on = last_modified_on
if last_modified_by is not None:
self.last_modified_by = last_modified_by
if links is not None:
self.links = links
@property
def id(self):
return self._id
@id.setter
def id(self, id):
self._id = id
@property
def application(self):
return self._application
@application.setter
def application(self, application):
self._application = application
@property
def version(self):
return self._version
@version.setter
def version(self, version):
self._version = version
@property
def api_version(self):
return self._api_version
@api_version.setter
def api_version(self, api_version):
self._api_version = api_version
@property
def installation_id(self):
return self._installation_id
@installation_id.setter
def installation_id(self, installation_id):
self._installation_id = installation_id
@property
def is_early_access_program(self):
return self._is_early_access_program
@is_early_access_program.setter
def is_early_access_program(self, is_early_access_program):
self._is_early_access_program = is_early_access_program
@property
def has_long_term_support(self):
return self._has_long_term_support
@has_long_term_support.setter
def has_long_term_support(self, has_long_term_support):
self._has_long_term_support = has_long_term_support
@property
def last_modified_on(self):
return self._last_modified_on
@last_modified_on.setter
def last_modified_on(self, last_modified_on):
self._last_modified_on = last_modified_on
@property
def last_modified_by(self):
return self._last_modified_by
@last_modified_by.setter
def last_modified_by(self, last_modified_by):
self._last_modified_by = last_modified_by
@property
def links(self):
return self._links
@links.setter
def links(self, links):
self._links = links
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(RootResource, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, RootResource):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c497f273191aaa9f08c21c995e05301e9578810
| 546
|
py
|
Python
|
python/collatz_conjecture.py
|
lsantosdemoura/clojure-algorithms
|
56696b7b6544f37d736135cac6b03342fdeb4825
|
[
"MIT"
] | null | null | null |
python/collatz_conjecture.py
|
lsantosdemoura/clojure-algorithms
|
56696b7b6544f37d736135cac6b03342fdeb4825
|
[
"MIT"
] | null | null | null |
python/collatz_conjecture.py
|
lsantosdemoura/clojure-algorithms
|
56696b7b6544f37d736135cac6b03342fdeb4825
|
[
"MIT"
] | null | null | null |
def calculate_sieve(number):
if number <= 0:
print(f'{number} is less than or equal to 0, enter another number please:')
ask_number()
else:
count = 1
while number != 1:
if number % 2 == 0:
number = number // 2
else:
number = (number * 3) + 1
count += 1
print(count)
def ask_number():
entered_number = int(input("Enter a number bigger than 0: "))
calculate_sieve(entered_number)
if __name__ == '__main__':
ask_number()
| 23.73913
| 83
| 0.53663
|
def calculate_sieve(number):
if number <= 0:
print(f'{number} is less than or equal to 0, enter another number please:')
ask_number()
else:
count = 1
while number != 1:
if number % 2 == 0:
number = number // 2
else:
number = (number * 3) + 1
count += 1
print(count)
def ask_number():
entered_number = int(input("Enter a number bigger than 0: "))
calculate_sieve(entered_number)
if __name__ == '__main__':
ask_number()
| true
| true
|
1c498059b0ab55361020f761725d41830c547370
| 2,423
|
py
|
Python
|
bayesvp/tests/test_likelihood.py
|
cameronliang/BayesVP
|
3a38e6fc8b85f96f402289fde74f996971edec93
|
[
"MIT"
] | 5
|
2017-10-10T20:24:05.000Z
|
2017-11-02T20:20:34.000Z
|
bayesvp/tests/test_likelihood.py
|
cameronliang/BayesVP
|
3a38e6fc8b85f96f402289fde74f996971edec93
|
[
"MIT"
] | 1
|
2019-11-15T18:17:19.000Z
|
2019-11-15T18:36:01.000Z
|
bayesvp/tests/test_likelihood.py
|
cameronliang/BayesVP
|
3a38e6fc8b85f96f402289fde74f996971edec93
|
[
"MIT"
] | 4
|
2018-05-22T14:30:23.000Z
|
2021-09-23T09:23:46.000Z
|
import unittest
import os
import sys
import numpy as np
from bayesvp.config import DefineParams
from bayesvp.likelihood import Posterior
from bayesvp.utilities import get_bayesvp_Dir
###############################################################################
# TEST CASE 1: OVI line with stock config file and spectrum
###############################################################################
class TCPosterior(unittest.TestCase):
def setUp(self):
# read example config file
code_path = get_bayesvp_Dir()
self.config_ex = code_path + '/data/example/config_OVI.dat'
self.config_params = DefineParams(self.config_ex)
self.posterior = Posterior(self.config_params)
def tearDown(self):
try:
import shutil
shutil.rmtree(self.config_params.output_path)
except OSError as oserr:
print(oserr)
###########################################################################
# Basic Tests for likelihood, prior and posterior
###########################################################################
def test_default_no_continuum(self):
self.assertFalse(self.config_params.cont_normalize)
def test_lnlike(self):
vp_params = np.array([15,20,0]) # logN, b, z
correct = -344.55470583729573
self.assertEqual(self.posterior.lnlike(vp_params),correct)
def test_prior(self):
vp_params = np.array([15,20,0])
correct = 0
self.assertEqual(self.posterior.lnprior(vp_params),correct)
# Outside of prior (logN)
vp_params = np.array([19,20,0])
correct = -np.inf
self.assertEqual(self.posterior.lnprior(vp_params),correct)
# Outside of prior (b)
vp_params = np.array([15,-10,0])
correct = -np.inf
self.assertEqual(self.posterior.lnprior(vp_params),correct)
# Outside of prior (z)
vp_params = np.array([10,20,-1])
correct = -np.inf
self.assertEqual(self.posterior.lnprior(vp_params),correct)
def test_call(self):
vp_params = np.array([15,20,0])
correct = -344.55470583729573
self.assertEqual(self.posterior.__call__(vp_params),correct)
vp_params = np.array([10,20,-1])
correct = -np.inf
self.assertEqual(self.posterior.__call__(vp_params),correct)
if __name__ == '__main__':
unittest.main()
| 31.881579
| 79
| 0.570367
|
import unittest
import os
import sys
import numpy as np
from bayesvp.config import DefineParams
from bayesvp.likelihood import Posterior
from bayesvp.utilities import get_bayesvp_Dir
class TCPosterior(unittest.TestCase):
def setUp(self):
code_path = get_bayesvp_Dir()
self.config_ex = code_path + '/data/example/config_OVI.dat'
self.config_params = DefineParams(self.config_ex)
self.posterior = Posterior(self.config_params)
def tearDown(self):
try:
import shutil
shutil.rmtree(self.config_params.output_path)
except OSError as oserr:
print(oserr)
def test_default_no_continuum(self):
self.assertFalse(self.config_params.cont_normalize)
def test_lnlike(self):
vp_params = np.array([15,20,0]) correct = -344.55470583729573
self.assertEqual(self.posterior.lnlike(vp_params),correct)
def test_prior(self):
vp_params = np.array([15,20,0])
correct = 0
self.assertEqual(self.posterior.lnprior(vp_params),correct)
vp_params = np.array([19,20,0])
correct = -np.inf
self.assertEqual(self.posterior.lnprior(vp_params),correct)
vp_params = np.array([15,-10,0])
correct = -np.inf
self.assertEqual(self.posterior.lnprior(vp_params),correct)
vp_params = np.array([10,20,-1])
correct = -np.inf
self.assertEqual(self.posterior.lnprior(vp_params),correct)
def test_call(self):
vp_params = np.array([15,20,0])
correct = -344.55470583729573
self.assertEqual(self.posterior.__call__(vp_params),correct)
vp_params = np.array([10,20,-1])
correct = -np.inf
self.assertEqual(self.posterior.__call__(vp_params),correct)
if __name__ == '__main__':
unittest.main()
| true
| true
|
1c498119f6fa59f0759598353b4eb9eb224fdda7
| 1,104
|
py
|
Python
|
h2o-py/tests/testdir_misc/pyunit_download_all_logs.py
|
ChristosChristofidis/h2o-3
|
2a926c0950a98eff5a4c06aeaf0373e17176ecd8
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_misc/pyunit_download_all_logs.py
|
ChristosChristofidis/h2o-3
|
2a926c0950a98eff5a4c06aeaf0373e17176ecd8
|
[
"Apache-2.0"
] | null | null | null |
h2o-py/tests/testdir_misc/pyunit_download_all_logs.py
|
ChristosChristofidis/h2o-3
|
2a926c0950a98eff5a4c06aeaf0373e17176ecd8
|
[
"Apache-2.0"
] | 1
|
2020-12-18T19:20:02.000Z
|
2020-12-18T19:20:02.000Z
|
import sys, os
sys.path.insert(1, "../../")
import h2o
import random
def download_all_logs(ip,port):
# Connect to h2o
h2o.init(ip,port)
# default
log_location = h2o.download_all_logs()
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
# dirname and filename
log_location = h2o.download_all_logs(".","h2o_logs.txt")
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
# dirname
log_location = h2o.download_all_logs(dirname=".")
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
# filename
log_location = h2o.download_all_logs(filename="h2o_logs.txt")
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
if __name__ == "__main__":
h2o.run_test(sys.argv, download_all_logs)
| 35.612903
| 118
| 0.712862
|
import sys, os
sys.path.insert(1, "../../")
import h2o
import random
def download_all_logs(ip,port):
h2o.init(ip,port)
log_location = h2o.download_all_logs()
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
# dirname and filename
log_location = h2o.download_all_logs(".","h2o_logs.txt")
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
log_location = h2o.download_all_logs(dirname=".")
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
# filename
log_location = h2o.download_all_logs(filename="h2o_logs.txt")
assert os.path.exists(log_location), "Expected h2o logs to be saved in {0}, but they weren't".format(log_location)
os.remove(log_location)
if __name__ == "__main__":
h2o.run_test(sys.argv, download_all_logs)
| true
| true
|
1c4981ea161448965260abc067ee7218670be9b4
| 218
|
py
|
Python
|
text/_cascade/text/spacing/word.py
|
jedhsu/text
|
8525b602d304ac571a629104c48703443244545c
|
[
"Apache-2.0"
] | null | null | null |
text/_cascade/text/spacing/word.py
|
jedhsu/text
|
8525b602d304ac571a629104c48703443244545c
|
[
"Apache-2.0"
] | null | null | null |
text/_cascade/text/spacing/word.py
|
jedhsu/text
|
8525b602d304ac571a629104c48703443244545c
|
[
"Apache-2.0"
] | null | null | null |
"""
Word Spacing
"""
__all__ = ["WordSpacing"]
class WordSpacingKeyword:
Normal = "normal"
class WordSpacing(
WordSpacingKeyword,
Length,
):
"""
Spacing between each word.
"""
pass
| 9.083333
| 30
| 0.59633
|
__all__ = ["WordSpacing"]
class WordSpacingKeyword:
Normal = "normal"
class WordSpacing(
WordSpacingKeyword,
Length,
):
pass
| true
| true
|
1c4982c7f40c95390cf2ad55bc3592134703da57
| 5,605
|
py
|
Python
|
fa_en_keyboard_exchange.py
|
arian42/wrong-keyboard
|
c0c0842ae8181ff52b33675aa7171de43bb56513
|
[
"MIT"
] | null | null | null |
fa_en_keyboard_exchange.py
|
arian42/wrong-keyboard
|
c0c0842ae8181ff52b33675aa7171de43bb56513
|
[
"MIT"
] | null | null | null |
fa_en_keyboard_exchange.py
|
arian42/wrong-keyboard
|
c0c0842ae8181ff52b33675aa7171de43bb56513
|
[
"MIT"
] | null | null | null |
# -------------------------------------------------------------------------------------------------------------------
# this is a alpha version. need more work
# written by Arian Heydari
#
# things that i should add and fix:
# words list are bad (need better words file)
# add auto learn function for new words
# -------------------------------------------------------------------------------------------------------------------
def binary_search(alist, item):
first = 0
last = len(alist) - 1
found = False
while first <= last and not found:
pos = 0
midpoint = (first + last) // 2
if alist[midpoint] == item:
pos = midpoint
found = True
else:
if item < alist[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
return found
def lang_exchange(string):
dict = {
u'a': u'ش',
u'b': u'ذ',
u'c': u'ز',
u'd': u'ی',
u'e': u'ث',
u'f': u'ب',
u'g': u'ل',
u'h': u'ا',
u'i': u'ه',
u'j': u'ت',
u'k': u'ن',
u'l': u'م',
u'm': u'ئ',
u'n': u'د',
u'o': u'خ',
u'p': u'ح',
u'q': u'ض',
u'r': u'ق',
u's': u'س',
u't': u'ف',
u'u': u'ع',
u'v': u'ر',
u'w': u'ص',
u'x': u'ط',
u'y': u'غ',
u'z': u'ظ',
u'A': u'َ',
u'B': u'إ',
u'C': u'ژ',
u'D': u'ِ',
u'E': u'ٍ',
u'F': u'ّ',
u'G': u'ۀ',
u'H': u'آ',
u'I': u']',
u'J': u'ـ',
u'K': u'«',
u'L': u'»',
u'M': u'ء',
u'N': u'أ',
u'O': u'[',
u'P': u'\\',
u'Q': u'ً',
u'R': u'ريال',
u'S': u'ُ',
u'T': u'،',
u'U': u',',
u'V': u'ؤ',
u'W': u'ٌ',
u'X': u'ي',
u'Y': u'؛',
u'Z': u'ة',
u';': u'ک',
u'\'': u'گ',
u',': u'و',
u'.': u'.',
u'/': u'/',
u'[': u'ج',
u']': u'چ',
u'\\': u'پ',
u':': u':',
u'"': u'"',
u'<': u'<',
u'>': u'>',
u'?': u'؟',
u'{': u'}',
u'}': u'{',
u'|': u'|',
u'`': u'÷',
u'1': u'1',
u'2': u'2',
u'3': u'3',
u'4': u'4',
u'5': u'5',
u'6': u'6',
u'7': u'7',
u'8': u'8',
u'9': u'9',
u'0': u'0',
u'-': u'-',
u'=': u'=',
u'~': u'×',
u'!': u'!',
u'@': u'@',
u'#': u'#',
u'$': u'$',
u'%': u'%',
u'^': u'^',
u'&': u'&',
u'*': u'*',
u'(': u')',
u')': u'(',
u'_': u'_',
u'+': u'+',
u' ': u' ',
}
rdict = {v: k for k, v in dict.items()}
newString = ''
for i in range(len(string)):
if string[i] in dict:
newString += dict[string[i]]
elif string[i] in rdict:
newString += rdict[string[i]]
else:
newString += string[i]
return newString
print("Whait a bit please. loading data...")
# LOAD DATA -------------
enChars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', ]
englishWordsFile = open("en.words.txt", 'r')
englishWordsList = englishWordsFile.read().split(',')
englishWordsFile.close()
faChars = [u'ظ', u'ط', u'ز', u'ر', u'ذ', u'د', u'ئ', u'و', u'ش', u'س', u'ی', u'ب', u'ل', u'ا', u'ت', u'ن', u'م', u'ک',
u'گ', u'ض', u'ص',
u'ث', u'ق', u'ف', u'غ', u'ع', u'ه', u'خ', u'ح', u'ج', u'چ', u'پ', u'ة', u'ي', u'ژ', u'ؤ', u'إ', u'أ', u'ء',
u'َ', u'ُ', u'ِ', u'ّ',
u'ۀ', u'آ', u'ـ', u'«', u'»', u'ً', u'ٌ', u'ٍ', u'ريال', u'،', u'؛', u',', u']', u'[', u'×', ]
farsiWordsFile = open("fa.words.txt", "r", encoding="utf-8")
farsiWordsList = farsiWordsFile.read().split(u',')
farsiWordsFile.close()
# INPUT ---------------
def translate(input_data):
rowInput = input_data
splitInput = rowInput.split()
enWordsNumbers = 0
faWordsNumbers = 0
otherWordsNumbers = 0
allWords = 0
allChar = 0
englishChar = 0
farsiChar = 0
for x in splitInput:
allWords += 1
for i in x:
allChar += 1
if i in enChars:
englishChar += 1
if i in faChars:
farsiChar += 1
if binary_search(farsiWordsList, x):
faWordsNumbers += 1
elif binary_search(englishWordsList, x):
enWordsNumbers += 1
else:
otherWordsNumbers += 1
if farsiChar + englishChar * 2 >= allChar:
if faWordsNumbers * 20 >= allWords or enWordsNumbers * 20 >= allWords:
# it is farsi or english
return rowInput
else:
translate_words = lang_exchange(rowInput)
new_words = 0
for words in translate_words.split():
if binary_search(englishWordsList, words) or binary_search(farsiWordsList, words):
new_words += 1
if new_words * 10 > len(translate_words.split()):
return translate_words
return rowInput
else:
# it is other language
return rowInput
print("Done. ready to use ,just type:")
while True:
print(translate(input()))
| 27.747525
| 120
| 0.363426
|
def binary_search(alist, item):
first = 0
last = len(alist) - 1
found = False
while first <= last and not found:
pos = 0
midpoint = (first + last) // 2
if alist[midpoint] == item:
pos = midpoint
found = True
else:
if item < alist[midpoint]:
last = midpoint - 1
else:
first = midpoint + 1
return found
def lang_exchange(string):
dict = {
u'a': u'ش',
u'b': u'ذ',
u'c': u'ز',
u'd': u'ی',
u'e': u'ث',
u'f': u'ب',
u'g': u'ل',
u'h': u'ا',
u'i': u'ه',
u'j': u'ت',
u'k': u'ن',
u'l': u'م',
u'm': u'ئ',
u'n': u'د',
u'o': u'خ',
u'p': u'ح',
u'q': u'ض',
u'r': u'ق',
u's': u'س',
u't': u'ف',
u'u': u'ع',
u'v': u'ر',
u'w': u'ص',
u'x': u'ط',
u'y': u'غ',
u'z': u'ظ',
u'A': u'َ',
u'B': u'إ',
u'C': u'ژ',
u'D': u'ِ',
u'E': u'ٍ',
u'F': u'ّ',
u'G': u'ۀ',
u'H': u'آ',
u'I': u']',
u'J': u'ـ',
u'K': u'«',
u'L': u'»',
u'M': u'ء',
u'N': u'أ',
u'O': u'[',
u'P': u'\\',
u'Q': u'ً',
u'R': u'ريال',
u'S': u'ُ',
u'T': u'،',
u'U': u',',
u'V': u'ؤ',
u'W': u'ٌ',
u'X': u'ي',
u'Y': u'؛',
u'Z': u'ة',
u';': u'ک',
u'\'': u'گ',
u',': u'و',
u'.': u'.',
u'/': u'/',
u'[': u'ج',
u']': u'چ',
u'\\': u'پ',
u':': u':',
u'"': u'"',
u'<': u'<',
u'>': u'>',
u'?': u'؟',
u'{': u'}',
u'}': u'{',
u'|': u'|',
u'`': u'÷',
u'1': u'1',
u'2': u'2',
u'3': u'3',
u'4': u'4',
u'5': u'5',
u'6': u'6',
u'7': u'7',
u'8': u'8',
u'9': u'9',
u'0': u'0',
u'-': u'-',
u'=': u'=',
u'~': u'×',
u'!': u'!',
u'@': u'@',
u' u'$': u'$',
u'%': u'%',
u'^': u'^',
u'&': u'&',
u'*': u'*',
u'(': u')',
u')': u'(',
u'_': u'_',
u'+': u'+',
u' ': u' ',
}
rdict = {v: k for k, v in dict.items()}
newString = ''
for i in range(len(string)):
if string[i] in dict:
newString += dict[string[i]]
elif string[i] in rdict:
newString += rdict[string[i]]
else:
newString += string[i]
return newString
print("Whait a bit please. loading data...")
# LOAD DATA -------------
enChars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z', ]
englishWordsFile = open("en.words.txt", 'r')
englishWordsList = englishWordsFile.read().split(',')
englishWordsFile.close()
faChars = [u'ظ', u'ط', u'ز', u'ر', u'ذ', u'د', u'ئ', u'و', u'ش', u'س', u'ی', u'ب', u'ل', u'ا', u'ت', u'ن', u'م', u'ک',
u'گ', u'ض', u'ص',
u'ث', u'ق', u'ف', u'غ', u'ع', u'ه', u'خ', u'ح', u'ج', u'چ', u'پ', u'ة', u'ي', u'ژ', u'ؤ', u'إ', u'أ', u'ء',
u'َ', u'ُ', u'ِ', u'ّ',
u'ۀ', u'آ', u'ـ', u'«', u'»', u'ً', u'ٌ', u'ٍ', u'ريال', u'،', u'؛', u',', u']', u'[', u'×', ]
farsiWordsFile = open("fa.words.txt", "r", encoding="utf-8")
farsiWordsList = farsiWordsFile.read().split(u',')
farsiWordsFile.close()
# INPUT ---------------
def translate(input_data):
rowInput = input_data
splitInput = rowInput.split()
enWordsNumbers = 0
faWordsNumbers = 0
otherWordsNumbers = 0
allWords = 0
allChar = 0
englishChar = 0
farsiChar = 0
for x in splitInput:
allWords += 1
for i in x:
allChar += 1
if i in enChars:
englishChar += 1
if i in faChars:
farsiChar += 1
if binary_search(farsiWordsList, x):
faWordsNumbers += 1
elif binary_search(englishWordsList, x):
enWordsNumbers += 1
else:
otherWordsNumbers += 1
if farsiChar + englishChar * 2 >= allChar:
if faWordsNumbers * 20 >= allWords or enWordsNumbers * 20 >= allWords:
# it is farsi or english
return rowInput
else:
translate_words = lang_exchange(rowInput)
new_words = 0
for words in translate_words.split():
if binary_search(englishWordsList, words) or binary_search(farsiWordsList, words):
new_words += 1
if new_words * 10 > len(translate_words.split()):
return translate_words
return rowInput
else:
# it is other language
return rowInput
print("Done. ready to use ,just type:")
while True:
print(translate(input()))
| true
| true
|
1c4982dab6584e4c750c0a7551513aed7ec8c4b7
| 221
|
py
|
Python
|
abc/abc190/abc190d-3.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | 1
|
2019-08-21T00:49:34.000Z
|
2019-08-21T00:49:34.000Z
|
abc/abc190/abc190d-3.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
abc/abc190/abc190d-3.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
N = int(input())
a = N
while a % 2 == 0:
a //= 2
result = 0
for i in range(1, int(a ** 0.5) + 1):
if a % i != 0:
continue
result += 1
if i * i != a:
result += 1
result *= 2
print(result)
| 13.8125
| 37
| 0.438914
|
N = int(input())
a = N
while a % 2 == 0:
a //= 2
result = 0
for i in range(1, int(a ** 0.5) + 1):
if a % i != 0:
continue
result += 1
if i * i != a:
result += 1
result *= 2
print(result)
| true
| true
|
1c498316fdd0c125a26460ff88c3dfe714b68c44
| 9,921
|
py
|
Python
|
train_sppe/src/utils/img.py
|
mdraw/AlphaPose
|
bed8e0798f6deed4789b9ae2646f72b9fd138c5b
|
[
"Apache-2.0"
] | null | null | null |
train_sppe/src/utils/img.py
|
mdraw/AlphaPose
|
bed8e0798f6deed4789b9ae2646f72b9fd138c5b
|
[
"Apache-2.0"
] | null | null | null |
train_sppe/src/utils/img.py
|
mdraw/AlphaPose
|
bed8e0798f6deed4789b9ae2646f72b9fd138c5b
|
[
"Apache-2.0"
] | null | null | null |
# -----------------------------------------------------
# Copyright (c) Shanghai Jiao Tong University. All rights reserved.
# Written by Jiefeng Li (jeff.lee.sjtu@gmail.com)
# -----------------------------------------------------
import numpy as np
import torch
import scipy.misc
import torch.nn.functional as F
import cv2
from opt import opt
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
CYAN = (255, 255, 0)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
def im_to_torch(img):
img = np.transpose(img, (2, 0, 1)) # C*H*W
img = to_torch(img).float()
if img.max() > 1:
img /= 255
return img
def torch_to_im(img):
img = to_numpy(img)
img = np.transpose(img, (1, 2, 0)) # C*H*W
return img
def load_image(img_path):
# H x W x C => C x H x W
return im_to_torch(scipy.misc.imread(img_path, mode='RGB'))
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def drawGaussian(img, pt, sigma):
img = to_numpy(img)
tmpSize = 3 * sigma
# Check that any part of the gaussian is in-bounds
ul = [int(pt[0] - tmpSize), int(pt[1] - tmpSize)]
br = [int(pt[0] + tmpSize + 1), int(pt[1] + tmpSize + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
# If not, just return the image as is
return to_torch(img)
# Generate gaussian
size = 2 * tmpSize + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
sigma = size / 4.0
# The gaussian is not normalized, we want the center value to equal 1
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
# Usable gaussian range
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
# Image range
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return to_torch(img)
def transformBox(pt, ul, br, inpH, inpW, resH, resW):
center = torch.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)
lenW = lenH * inpW / inpH
_pt = torch.zeros(2)
_pt[0] = pt[0] - ul[0]
_pt[1] = pt[1] - ul[1]
# Move to center
_pt[0] = _pt[0] + max(0, (lenW - 1) / 2 - center[0])
_pt[1] = _pt[1] + max(0, (lenH - 1) / 2 - center[1])
pt = (_pt * resH) / lenH
pt[0] = round(float(pt[0]))
pt[1] = round(float(pt[1]))
return pt.int()
def transformBoxInvert(pt, ul, br, inpH, inpW, resH, resW):
center = torch.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)
lenW = lenH * inpW / inpH
_pt = (pt * lenH) / resH
_pt[0] = _pt[0] - max(0, (lenW - 1) / 2 - center[0])
_pt[1] = _pt[1] - max(0, (lenH - 1) / 2 - center[1])
new_point = torch.zeros(2)
new_point[0] = _pt[0] + ul[0]
new_point[1] = _pt[1] + ul[1]
return new_point
def cropBox(img, ul, br, resH, resW):
ul = ul.int()
br = (br - 1).int()
# br = br.int()
lenH = max((br[1] - ul[1]).item(), (br[0] - ul[0]).item() * resH / resW)
lenW = lenH * resW / resH
if img.dim() == 2:
img = img[np.newaxis, :]
box_shape = [br[1] - ul[1], br[0] - ul[0]]
pad_size = [(lenH - box_shape[0]) // 2, (lenW - box_shape[1]) // 2]
# Padding Zeros
img[:, :ul[1], :], img[:, :, :ul[0]] = 0, 0
img[:, br[1] + 1:, :], img[:, :, br[0] + 1:] = 0, 0
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = np.array([ul[0] - pad_size[1], ul[1] - pad_size[0]], np.float32)
src[1, :] = np.array([br[0] + pad_size[1], br[1] + pad_size[0]], np.float32)
dst[0, :] = 0
dst[1, :] = np.array([resW - 1, resH - 1], np.float32)
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(torch_to_im(img), trans,
(resW, resH), flags=cv2.INTER_LINEAR)
return im_to_torch(torch.Tensor(dst_img))
def cv_rotate(img, rot, resW, resH):
center = np.array((resW - 1, resH - 1)) / 2
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, (resH - 1) * -0.5], rot_rad)
dst_dir = np.array([0, (resH - 1) * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center
src[1, :] = center + src_dir
dst[0, :] = [(resW - 1) * 0.5, (resH - 1) * 0.5]
dst[1, :] = np.array([(resW - 1) * 0.5, (resH - 1) * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(torch_to_im(img), trans,
(resW, resH), flags=cv2.INTER_LINEAR)
return im_to_torch(torch.Tensor(dst_img))
def flip_v(x, cuda=False):
x = flip(x.cpu().data)
if cuda:
x = x
x = torch.autograd.Variable(x)
return x
def flip(x):
assert (x.dim() == 3 or x.dim() == 4)
# dim = x.dim() - 1
x = x.numpy().copy()
if x.ndim == 3:
x = np.transpose(np.fliplr(np.transpose(x, (0, 2, 1))), (0, 2, 1))
elif x.ndim == 4:
for i in range(x.shape[0]):
x[i] = np.transpose(
np.fliplr(np.transpose(x[i], (0, 2, 1))), (0, 2, 1))
# x = x.swapaxes(dim, 0)
# x = x[::-1, ...]
# x = x.swapaxes(0, dim)
return torch.from_numpy(x.copy())
def shuffleLR(x, dataset):
flipRef = dataset.flipRef
assert (x.dim() == 3 or x.dim() == 4)
for pair in flipRef:
dim0, dim1 = pair
dim0 -= 1
dim1 -= 1
if x.dim() == 4:
tmp = x[:, dim1].clone()
x[:, dim1] = x[:, dim0].clone()
x[:, dim0] = tmp.clone()
#x[:, dim0], x[:, dim1] = deepcopy((x[:, dim1], x[:, dim0]))
else:
tmp = x[dim1].clone()
x[dim1] = x[dim0].clone()
x[dim0] = tmp.clone()
#x[dim0], x[dim1] = deepcopy((x[dim1], x[dim0]))
return x
def shuffleLR_v(x, dataset, cuda=False):
x = shuffleLR(x.cpu().data, dataset)
if cuda:
x = x
x = torch.autograd.Variable(x)
return x
def vis_frame(frame, im_res, format='coco'):
'''
frame: frame image
im_res: im_res of predictions
format: coco or mpii
return rendered image
'''
if format == 'coco':
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4), # Head
(5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(5, 11), (6, 12), # Body
(11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [RED, RED, RED, RED, RED, YELLOW, YELLOW, YELLOW,
YELLOW, YELLOW, YELLOW, GREEN, GREEN, GREEN, GREEN, GREEN, GREEN]
line_color = [YELLOW, YELLOW, YELLOW, YELLOW, BLUE, BLUE,
BLUE, BLUE, BLUE, PURPLE, PURPLE, RED, RED, RED, RED]
elif format == 'mpii':
l_pair = [
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
(13, 14), (14, 15), (3, 4), (4, 5),
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
]
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED,
RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
line_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE,
RED, RED, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
else:
raise NotImplementedError
im_name = im_res['imgname'].split('/')[-1]
img = frame.copy()
for human in im_res['result']:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
# Draw keypoints
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.15:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (cor_x, cor_y)
cv2.circle(img, (cor_x, cor_y), 4, p_color[n], -1)
# Now create a mask of logo and create its inverse mask also
#transparency = max(0, min(1, kp_scores[n]))
#img = cv2.addWeighted(bg, transparency, img, 1, 0)
# Draw limbs
for i, (start_p, end_p) in enumerate(l_pair):
if start_p in part_line and end_p in part_line:
start_xy = part_line[start_p]
end_xy = part_line[end_p]
cv2.line(img, start_xy, end_xy,
line_color[i], (0.5 * (kp_scores[start_p] + kp_scores[end_p])) + 1)
#transparency = max(
# 0, min(1, (kp_scores[start_p] + kp_scores[end_p])))
#img = cv2.addWeighted(bg, transparency, img, 1, 0)
return img
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
| 31.100313
| 92
| 0.513154
|
import numpy as np
import torch
import scipy.misc
import torch.nn.functional as F
import cv2
from opt import opt
RED = (0, 0, 255)
GREEN = (0, 255, 0)
BLUE = (255, 0, 0)
CYAN = (255, 255, 0)
YELLOW = (0, 255, 255)
ORANGE = (0, 165, 255)
PURPLE = (255, 0, 255)
def im_to_torch(img):
img = np.transpose(img, (2, 0, 1)) img = to_torch(img).float()
if img.max() > 1:
img /= 255
return img
def torch_to_im(img):
img = to_numpy(img)
img = np.transpose(img, (1, 2, 0)) return img
def load_image(img_path):
return im_to_torch(scipy.misc.imread(img_path, mode='RGB'))
def to_numpy(tensor):
if torch.is_tensor(tensor):
return tensor.cpu().numpy()
elif type(tensor).__module__ != 'numpy':
raise ValueError("Cannot convert {} to numpy array"
.format(type(tensor)))
return tensor
def to_torch(ndarray):
if type(ndarray).__module__ == 'numpy':
return torch.from_numpy(ndarray)
elif not torch.is_tensor(ndarray):
raise ValueError("Cannot convert {} to torch tensor"
.format(type(ndarray)))
return ndarray
def drawGaussian(img, pt, sigma):
img = to_numpy(img)
tmpSize = 3 * sigma
ul = [int(pt[0] - tmpSize), int(pt[1] - tmpSize)]
br = [int(pt[0] + tmpSize + 1), int(pt[1] + tmpSize + 1)]
if (ul[0] >= img.shape[1] or ul[1] >= img.shape[0] or
br[0] < 0 or br[1] < 0):
return to_torch(img)
size = 2 * tmpSize + 1
x = np.arange(0, size, 1, float)
y = x[:, np.newaxis]
x0 = y0 = size // 2
sigma = size / 4.0
g = np.exp(- ((x - x0) ** 2 + (y - y0) ** 2) / (2 * sigma ** 2))
g_x = max(0, -ul[0]), min(br[0], img.shape[1]) - ul[0]
g_y = max(0, -ul[1]), min(br[1], img.shape[0]) - ul[1]
img_x = max(0, ul[0]), min(br[0], img.shape[1])
img_y = max(0, ul[1]), min(br[1], img.shape[0])
img[img_y[0]:img_y[1], img_x[0]:img_x[1]] = g[g_y[0]:g_y[1], g_x[0]:g_x[1]]
return to_torch(img)
def transformBox(pt, ul, br, inpH, inpW, resH, resW):
center = torch.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)
lenW = lenH * inpW / inpH
_pt = torch.zeros(2)
_pt[0] = pt[0] - ul[0]
_pt[1] = pt[1] - ul[1]
_pt[0] = _pt[0] + max(0, (lenW - 1) / 2 - center[0])
_pt[1] = _pt[1] + max(0, (lenH - 1) / 2 - center[1])
pt = (_pt * resH) / lenH
pt[0] = round(float(pt[0]))
pt[1] = round(float(pt[1]))
return pt.int()
def transformBoxInvert(pt, ul, br, inpH, inpW, resH, resW):
center = torch.zeros(2)
center[0] = (br[0] - 1 - ul[0]) / 2
center[1] = (br[1] - 1 - ul[1]) / 2
lenH = max(br[1] - ul[1], (br[0] - ul[0]) * inpH / inpW)
lenW = lenH * inpW / inpH
_pt = (pt * lenH) / resH
_pt[0] = _pt[0] - max(0, (lenW - 1) / 2 - center[0])
_pt[1] = _pt[1] - max(0, (lenH - 1) / 2 - center[1])
new_point = torch.zeros(2)
new_point[0] = _pt[0] + ul[0]
new_point[1] = _pt[1] + ul[1]
return new_point
def cropBox(img, ul, br, resH, resW):
ul = ul.int()
br = (br - 1).int()
lenH = max((br[1] - ul[1]).item(), (br[0] - ul[0]).item() * resH / resW)
lenW = lenH * resW / resH
if img.dim() == 2:
img = img[np.newaxis, :]
box_shape = [br[1] - ul[1], br[0] - ul[0]]
pad_size = [(lenH - box_shape[0]) // 2, (lenW - box_shape[1]) // 2]
img[:, :ul[1], :], img[:, :, :ul[0]] = 0, 0
img[:, br[1] + 1:, :], img[:, :, br[0] + 1:] = 0, 0
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = np.array([ul[0] - pad_size[1], ul[1] - pad_size[0]], np.float32)
src[1, :] = np.array([br[0] + pad_size[1], br[1] + pad_size[0]], np.float32)
dst[0, :] = 0
dst[1, :] = np.array([resW - 1, resH - 1], np.float32)
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(torch_to_im(img), trans,
(resW, resH), flags=cv2.INTER_LINEAR)
return im_to_torch(torch.Tensor(dst_img))
def cv_rotate(img, rot, resW, resH):
center = np.array((resW - 1, resH - 1)) / 2
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, (resH - 1) * -0.5], rot_rad)
dst_dir = np.array([0, (resH - 1) * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center
src[1, :] = center + src_dir
dst[0, :] = [(resW - 1) * 0.5, (resH - 1) * 0.5]
dst[1, :] = np.array([(resW - 1) * 0.5, (resH - 1) * 0.5]) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
dst_img = cv2.warpAffine(torch_to_im(img), trans,
(resW, resH), flags=cv2.INTER_LINEAR)
return im_to_torch(torch.Tensor(dst_img))
def flip_v(x, cuda=False):
x = flip(x.cpu().data)
if cuda:
x = x
x = torch.autograd.Variable(x)
return x
def flip(x):
assert (x.dim() == 3 or x.dim() == 4)
x = x.numpy().copy()
if x.ndim == 3:
x = np.transpose(np.fliplr(np.transpose(x, (0, 2, 1))), (0, 2, 1))
elif x.ndim == 4:
for i in range(x.shape[0]):
x[i] = np.transpose(
np.fliplr(np.transpose(x[i], (0, 2, 1))), (0, 2, 1))
return torch.from_numpy(x.copy())
def shuffleLR(x, dataset):
flipRef = dataset.flipRef
assert (x.dim() == 3 or x.dim() == 4)
for pair in flipRef:
dim0, dim1 = pair
dim0 -= 1
dim1 -= 1
if x.dim() == 4:
tmp = x[:, dim1].clone()
x[:, dim1] = x[:, dim0].clone()
x[:, dim0] = tmp.clone()
else:
tmp = x[dim1].clone()
x[dim1] = x[dim0].clone()
x[dim0] = tmp.clone()
return x
def shuffleLR_v(x, dataset, cuda=False):
x = shuffleLR(x.cpu().data, dataset)
if cuda:
x = x
x = torch.autograd.Variable(x)
return x
def vis_frame(frame, im_res, format='coco'):
if format == 'coco':
l_pair = [
(0, 1), (0, 2), (1, 3), (2, 4), (5, 6), (5, 7), (7, 9), (6, 8), (8, 10),
(5, 11), (6, 12), (11, 13), (12, 14), (13, 15), (14, 16)
]
p_color = [RED, RED, RED, RED, RED, YELLOW, YELLOW, YELLOW,
YELLOW, YELLOW, YELLOW, GREEN, GREEN, GREEN, GREEN, GREEN, GREEN]
line_color = [YELLOW, YELLOW, YELLOW, YELLOW, BLUE, BLUE,
BLUE, BLUE, BLUE, PURPLE, PURPLE, RED, RED, RED, RED]
elif format == 'mpii':
l_pair = [
(8, 9), (11, 12), (11, 10), (2, 1), (1, 0),
(13, 14), (14, 15), (3, 4), (4, 5),
(8, 7), (7, 6), (6, 2), (6, 3), (8, 12), (8, 13)
]
p_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE, RED,
RED, PURPLE, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
line_color = [PURPLE, BLUE, BLUE, RED, RED, BLUE, BLUE,
RED, RED, PURPLE, PURPLE, RED, RED, BLUE, BLUE]
else:
raise NotImplementedError
im_name = im_res['imgname'].split('/')[-1]
img = frame.copy()
for human in im_res['result']:
part_line = {}
kp_preds = human['keypoints']
kp_scores = human['kp_score']
for n in range(kp_scores.shape[0]):
if kp_scores[n] <= 0.15:
continue
cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1])
part_line[n] = (cor_x, cor_y)
cv2.circle(img, (cor_x, cor_y), 4, p_color[n], -1)
for i, (start_p, end_p) in enumerate(l_pair):
if start_p in part_line and end_p in part_line:
start_xy = part_line[start_p]
end_xy = part_line[end_p]
cv2.line(img, start_xy, end_xy,
line_color[i], (0.5 * (kp_scores[start_p] + kp_scores[end_p])) + 1)
return img
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
| true
| true
|
1c498364b124248db0499e5d367de8334f74324d
| 462
|
py
|
Python
|
data/scripts/templates/object/draft_schematic/furniture/shared_furniture_chair_elegant.py
|
obi-two/GameServer
|
7d37024e2291a97d49522610cd8f1dbe5666afc2
|
[
"MIT"
] | 20
|
2015-02-23T15:11:56.000Z
|
2022-03-18T20:56:48.000Z
|
data/scripts/templates/object/draft_schematic/furniture/shared_furniture_chair_elegant.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | null | null | null |
data/scripts/templates/object/draft_schematic/furniture/shared_furniture_chair_elegant.py
|
apathyboy/swganh
|
665128efe9154611dec4cb5efc61d246dd095984
|
[
"MIT"
] | 20
|
2015-04-04T16:35:59.000Z
|
2022-03-24T14:54:37.000Z
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/furniture/shared_furniture_chair_elegant.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
| 27.176471
| 88
| 0.735931
|
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/furniture/shared_furniture_chair_elegant.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
return result
| true
| true
|
1c4983c64dbb362dcacbdb6c9d607d9aba2da2ce
| 509
|
py
|
Python
|
pythran/tests/euler/euler10.py
|
artas360/pythran
|
66dad52d52be71693043e9a7d7578cfb9cb3d1da
|
[
"BSD-3-Clause"
] | null | null | null |
pythran/tests/euler/euler10.py
|
artas360/pythran
|
66dad52d52be71693043e9a7d7578cfb9cb3d1da
|
[
"BSD-3-Clause"
] | null | null | null |
pythran/tests/euler/euler10.py
|
artas360/pythran
|
66dad52d52be71693043e9a7d7578cfb9cb3d1da
|
[
"BSD-3-Clause"
] | 1
|
2017-03-12T20:32:36.000Z
|
2017-03-12T20:32:36.000Z
|
#runas solve(2000000)
#pythran export solve(int)
def solve(max):
'''
The sum of the primes below 10 is 2 + 3 + 5 + 7 = 17.
Find the sum of all the primes below two million.
'''
sieve = [True] * max # Sieve is faster for 2M primes
def mark(sieve, x):
for i in xrange(x+x, len(sieve), x):
sieve[i] = False
for x in xrange(2, int(len(sieve) ** 0.5) + 1):
if sieve[x]: mark(sieve, x)
return sum(i for i in xrange(2, len(sieve)) if sieve[i])
| 25.45
| 60
| 0.563851
|
def solve(max):
sieve = [True] * max
def mark(sieve, x):
for i in xrange(x+x, len(sieve), x):
sieve[i] = False
for x in xrange(2, int(len(sieve) ** 0.5) + 1):
if sieve[x]: mark(sieve, x)
return sum(i for i in xrange(2, len(sieve)) if sieve[i])
| true
| true
|
1c4984353c9bf656314d3233f534932929e34855
| 2,833
|
py
|
Python
|
cvjyo.py
|
Aravind-Suresh/CVJyo
|
6cb324fb538a50939335fd28ee90e23fbb32f2c0
|
[
"MIT"
] | null | null | null |
cvjyo.py
|
Aravind-Suresh/CVJyo
|
6cb324fb538a50939335fd28ee90e23fbb32f2c0
|
[
"MIT"
] | null | null | null |
cvjyo.py
|
Aravind-Suresh/CVJyo
|
6cb324fb538a50939335fd28ee90e23fbb32f2c0
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
import sys
import math
def markPoints(pts, img):
for pt in pts:
cv2.circle(img, tuple((pt[0], pt[1])), 2, 0, -1)
def contourAreaComparator(cnt1, cnt2):
if cv2.contourArea(cnt1) > cv2.contourArea(cnt2):
return 1
else:
return -1
def orderClockwise(ptsO, pt):
pts = ptsO - np.asarray(pt)
pts = np.array(pts, dtype=np.float32)
slopes = []
for p in pts:
if p[0] > 0:
slopes.append(math.atan(p[1]/p[0]))
else:
slopes.append(math.pi + math.atan(p[1]/p[0]))
ptsSorted = [y for x, y in sorted(zip(list(slopes), list(np.arange(len(ptsO)))))]
ptsSorted = ptsO[ptsSorted]
return ptsSorted
img = cv2.imread(sys.argv[1], 0)
img = cv2.GaussianBlur(img, (5, 5), 0)
height,width = img.shape
_,otsu = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imshow("img", otsu); cv2.waitKey(0);
imgAnd = cv2.bitwise_and(img, otsu)
cv2.imshow("img", imgAnd); cv2.waitKey(0);
_, contours, hierarchy = cv2.findContours(otsu, 1, 2)
area = []
for cnt in contours:
area.append(cv2.contourArea(cnt))
area = np.array(area)
idx = np.max(area)
idx = np.where(area==idx)[0][0]
cnt = contours[idx]
hull = cv2.convexHull(cnt, returnPoints = False)
defects = cv2.convexityDefects(cnt, hull)
for d in defects:
s, e, f, appr = d[0]
cv2.circle(imgAnd, tuple(cnt[f][0]), 2, 255, -1)
dt = cv2.distanceTransform(otsu, cv2.DIST_L2, 3)
cv2.normalize(dt, dt, 0.0, 1.0, cv2.NORM_MINMAX);
cv2.imshow("img", dt);cv2.waitKey(0)
idx = np.where(dt==np.max(dt))
pt = (idx[1][0], idx[0][0])
defPts = cnt[defects[:, 0, 2]]
defPts = defPts.reshape(-1,2)
thrDistTop = int(0.4*height)
thrDistLeft = int(0.2*width)
defPts = defPts[np.where(defPts[:, 1] > thrDistTop)[0]]
defPts = defPts[np.where(defPts[:, 0] > thrDistLeft)[0]]
#markPoints(defPts, img)
#cv2.imshow("img", img); cv2.waitKey(0)
defPtsC = defPts.copy()
defPts = orderClockwise(defPtsC, pt)
# ii = 0
# for p in defPts:
# cv2.putText(img, str(ii), (p[0], p[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, 255)
# ii = ii + 1
# cv2.imshow("img", img); cv2.waitKey(0)
boundImg = np.zeros((height,width), np.uint8)
cv2.fillPoly(boundImg, [defPts], 255)
imgRoi = cv2.bitwise_and(img, boundImg)
imgRoi = cv2.adaptiveThreshold(imgRoi, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
kernel = np.ones((5,5),np.uint8)
boundImg = cv2.erode(boundImg,kernel,iterations = 1)
imgRoi = cv2.bitwise_and(imgRoi, boundImg)
cv2.imshow("img", imgRoi); cv2.waitKey(0)
imgRoiC = imgRoi.copy()
_, contours, hierarchy = cv2.findContours(imgRoiC, 1, 2)
contours.sort(contourAreaComparator)
l = len(contours)
ll = np.arange(l-6, l-1)
imgColor = cv2.imread(sys.argv[1])
for idx in ll:
cv2.drawContours(imgRoi, contours, idx, 127, 3)
cv2.drawContours(imgColor, contours, idx, (0, 0, 255), 3)
cv2.imshow("img", imgColor); cv2.waitKey(0)
| 26.476636
| 101
| 0.678433
|
import cv2
import numpy as np
import sys
import math
def markPoints(pts, img):
for pt in pts:
cv2.circle(img, tuple((pt[0], pt[1])), 2, 0, -1)
def contourAreaComparator(cnt1, cnt2):
if cv2.contourArea(cnt1) > cv2.contourArea(cnt2):
return 1
else:
return -1
def orderClockwise(ptsO, pt):
pts = ptsO - np.asarray(pt)
pts = np.array(pts, dtype=np.float32)
slopes = []
for p in pts:
if p[0] > 0:
slopes.append(math.atan(p[1]/p[0]))
else:
slopes.append(math.pi + math.atan(p[1]/p[0]))
ptsSorted = [y for x, y in sorted(zip(list(slopes), list(np.arange(len(ptsO)))))]
ptsSorted = ptsO[ptsSorted]
return ptsSorted
img = cv2.imread(sys.argv[1], 0)
img = cv2.GaussianBlur(img, (5, 5), 0)
height,width = img.shape
_,otsu = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
cv2.imshow("img", otsu); cv2.waitKey(0);
imgAnd = cv2.bitwise_and(img, otsu)
cv2.imshow("img", imgAnd); cv2.waitKey(0);
_, contours, hierarchy = cv2.findContours(otsu, 1, 2)
area = []
for cnt in contours:
area.append(cv2.contourArea(cnt))
area = np.array(area)
idx = np.max(area)
idx = np.where(area==idx)[0][0]
cnt = contours[idx]
hull = cv2.convexHull(cnt, returnPoints = False)
defects = cv2.convexityDefects(cnt, hull)
for d in defects:
s, e, f, appr = d[0]
cv2.circle(imgAnd, tuple(cnt[f][0]), 2, 255, -1)
dt = cv2.distanceTransform(otsu, cv2.DIST_L2, 3)
cv2.normalize(dt, dt, 0.0, 1.0, cv2.NORM_MINMAX);
cv2.imshow("img", dt);cv2.waitKey(0)
idx = np.where(dt==np.max(dt))
pt = (idx[1][0], idx[0][0])
defPts = cnt[defects[:, 0, 2]]
defPts = defPts.reshape(-1,2)
thrDistTop = int(0.4*height)
thrDistLeft = int(0.2*width)
defPts = defPts[np.where(defPts[:, 1] > thrDistTop)[0]]
defPts = defPts[np.where(defPts[:, 0] > thrDistLeft)[0]]
defPtsC = defPts.copy()
defPts = orderClockwise(defPtsC, pt)
boundImg = np.zeros((height,width), np.uint8)
cv2.fillPoly(boundImg, [defPts], 255)
imgRoi = cv2.bitwise_and(img, boundImg)
imgRoi = cv2.adaptiveThreshold(imgRoi, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
kernel = np.ones((5,5),np.uint8)
boundImg = cv2.erode(boundImg,kernel,iterations = 1)
imgRoi = cv2.bitwise_and(imgRoi, boundImg)
cv2.imshow("img", imgRoi); cv2.waitKey(0)
imgRoiC = imgRoi.copy()
_, contours, hierarchy = cv2.findContours(imgRoiC, 1, 2)
contours.sort(contourAreaComparator)
l = len(contours)
ll = np.arange(l-6, l-1)
imgColor = cv2.imread(sys.argv[1])
for idx in ll:
cv2.drawContours(imgRoi, contours, idx, 127, 3)
cv2.drawContours(imgColor, contours, idx, (0, 0, 255), 3)
cv2.imshow("img", imgColor); cv2.waitKey(0)
| true
| true
|
1c49844b5764b12e0b5ad75cf890bacc50de35c9
| 16,557
|
py
|
Python
|
tests/core/test_lightning_optimizer.py
|
aribornstein/pytorch-lightning
|
ca68cac57ad8eefc9b477ee126eb42a483f27a39
|
[
"Apache-2.0"
] | 1
|
2021-01-18T06:31:43.000Z
|
2021-01-18T06:31:43.000Z
|
tests/core/test_lightning_optimizer.py
|
aribornstein/pytorch-lightning
|
ca68cac57ad8eefc9b477ee126eb42a483f27a39
|
[
"Apache-2.0"
] | 8
|
2020-10-27T22:39:24.000Z
|
2021-01-24T16:41:34.000Z
|
tests/core/test_lightning_optimizer.py
|
tarepan/pytorch-lightning
|
0b7f5a88a0f4691ec228c4708295a10d403fd592
|
[
"Apache-2.0"
] | null | null | null |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest.mock import patch
import numpy as np
import pytest
import torch
import torch.nn as nn
from torch.optim import Adam, Optimizer
import pytorch_lightning as pl
from pytorch_lightning import LightningModule, seed_everything, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_utils import is_overridden
from tests.base.boring_model import BoringModel, RandomDataset, RandomDictDataset, RandomDictStringDataset
def test_lightning_optimizer(tmpdir):
"""
Test that optimizer are correctly wrapped by our LightningOptimizer
"""
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
# optimizer = LightningOptimizer(self.trainer, optimizer)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
groups = "{'dampening': 0, 'initial_lr': 0.1, 'lr': 0.01, 'momentum': 0, 'nesterov': False, 'weight_decay': 0}"
expected = f"LightningSGD(groups=[{groups}])"
assert trainer._lightning_optimizers[0].__repr__() == expected
def test_lightning_optimizer_from_user(tmpdir):
"""
Test that the user can use our LightningOptimizer. Not recommended.
"""
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer = LightningOptimizer(optimizer)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
groups = "{'amsgrad': False, 'betas': (0.9, 0.999), 'eps': 1e-08, 'initial_lr': 0.1, 'lr': 0.01, 'weight_decay': 0}"
expected = f"LightningAdam(groups=[{groups}])"
assert trainer._lightning_optimizers[0].__repr__() == expected
@patch("torch.optim.Adam.step", autospec=True)
@patch("torch.optim.SGD.step", autospec=True)
def test_lightning_optimizer_manual_optimization(mock_sgd_step, mock_adam_step, tmpdir):
"""
Test that the user can use our LightningOptimizer. Not recommended for now.
"""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx, optimizer_idx=None):
(opt_1, opt_2) = self.optimizers()
assert isinstance(opt_1, LightningOptimizer)
assert isinstance(opt_2, LightningOptimizer)
output = self.layer(batch)
loss_1 = self.loss(batch, output)
self.manual_backward(loss_1, opt_1)
opt_1.step()
def closure():
output = self.layer(batch)
loss_2 = self.loss(batch, output)
self.manual_backward(loss_2, opt_2)
opt_2.step(closure=closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer_1 = LightningOptimizer(optimizer_1, 4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
model.training_step_end = None
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=8,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert len(mock_sgd_step.mock_calls) == 2
assert len(mock_adam_step.mock_calls) == 8
@patch("torch.optim.Adam.step", autospec=True)
@patch("torch.optim.SGD.step", autospec=True)
def test_lightning_optimizer_manual_optimization_and_accumulated_gradients(mock_sgd_step, mock_adam_step, tmpdir):
"""
Test that the user can use our LightningOptimizer. Not recommended.
"""
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx, optimizer_idx=None):
(opt_1, opt_2) = self.optimizers()
assert isinstance(opt_1, LightningOptimizer)
assert isinstance(opt_2, LightningOptimizer)
output = self.layer(batch)
loss_1 = self.loss(batch, output)
self.manual_backward(loss_1, opt_1)
opt_1.step()
def closure():
output = self.layer(batch)
loss_2 = self.loss(batch, output)
self.manual_backward(loss_2, opt_2)
opt_2.step(closure=closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer_1 = LightningOptimizer(optimizer_1, 4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
model.training_step_end = None
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=8,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
accumulate_grad_batches=2,
)
trainer.fit(model)
assert len(mock_sgd_step.mock_calls) == 2
assert len(mock_adam_step.mock_calls) == 4
def test_state(tmpdir):
model = torch.nn.Linear(3, 4)
optimizer = torch.optim.Adam(model.parameters())
lightning_optimizer = LightningOptimizer(optimizer)
# test state
assert optimizer.state == lightning_optimizer.state
lightning_optimizer.state = optimizer.state
assert optimizer.state == lightning_optimizer.state
# test param_groups
assert optimizer.param_groups == lightning_optimizer.param_groups
lightning_optimizer.param_groups = optimizer.param_groups
assert optimizer.param_groups == lightning_optimizer.param_groups
# test defaults
assert optimizer.defaults == lightning_optimizer.defaults
lightning_optimizer.defaults = optimizer.defaults
assert optimizer.defaults == lightning_optimizer.defaults
assert isinstance(lightning_optimizer, LightningOptimizer)
assert isinstance(lightning_optimizer, Adam)
assert isinstance(lightning_optimizer, Optimizer)
lightning_dict = {}
special_attrs = ["_accumulate_grad_batches", "_optimizer", "_optimizer_idx", "_support_closure",
"_trainer", "__getstate__", "__setstate__", "state_dict", "load_state_dict",
"zero_grad", "__setstate__", "add_param_group"]
for k, v in lightning_optimizer.__dict__.items():
if k not in special_attrs:
lightning_dict[k] = v
assert lightning_dict == optimizer.__dict__
assert optimizer.state_dict() == lightning_optimizer.state_dict()
assert optimizer.state == lightning_optimizer.state
def test_lightning_optimizer_automatic_optimization(tmpdir):
"""
Test lightning optimize works with make_optimizer_step in automatic_optimization
"""
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
optimizer.step(closure=optimizer_closure, make_optimizer_step=batch_idx % 2 == 0)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer_1 = LightningOptimizer(optimizer_1, 4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=10,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad(tmpdir):
"""
Test lightning optimize works with optimizer_zero_grad overrides in automatic_optimization
"""
with patch("torch.optim.Adam.zero_grad") as adam_zero_grad, \
patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
if optimizer_idx == 0:
if batch_idx % 2 == 0:
optimizer.zero_grad()
if optimizer_idx == 1:
if batch_idx % 5 == 0:
optimizer.zero_grad()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
optimizer.step(closure=optimizer_closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=10,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert adam_zero_grad.call_count == 2
assert sgd_zero_grad.call_count == 5
def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad_make_optimizer_step(tmpdir):
"""
Test lightning optimize works with optimizer_zero_grad overrides and make_optimizer_step in automatic_optimization
"""
try:
with patch("torch.optim.Adam.zero_grad") as adam_zero_grad, \
patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
if optimizer_idx == 0:
if batch_idx % 2 == 0:
optimizer.zero_grad()
if optimizer_idx == 1:
if batch_idx % 5 == 0:
optimizer.zero_grad()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
if optimizer_idx == 0:
optimizer.step(closure=optimizer_closure, make_optimizer_step=batch_idx % 3 == 0)
return
optimizer.step(closure=optimizer_closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=20,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert adam_zero_grad.call_count == 4
assert sgd_zero_grad.call_count == 10
except MisconfigurationException as e:
assert "When overriding LightningModule `optimizer_zero_grad`, make_optimizer_step is not allowed" in str(e)
def test_lightning_optimizer_automatic_optimization_make_optimizer_step_2(tmpdir):
"""
Test lightning optimize works with make_optimizer_step in automatic_optimization
"""
with patch("torch.optim.Adam.zero_grad") as adam_zero_grad, \
patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
make_optimizer_step = None
if optimizer_idx == 0:
make_optimizer_step = batch_idx % 4 == 0
optimizer.step(closure=optimizer_closure, make_optimizer_step=make_optimizer_step)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=20,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert adam_zero_grad.call_count == 20
assert sgd_zero_grad.call_count == 5
| 38.684579
| 120
| 0.643474
|
import os
from unittest.mock import patch
import numpy as np
import pytest
import torch
import torch.nn as nn
from torch.optim import Adam, Optimizer
import pytorch_lightning as pl
from pytorch_lightning import LightningModule, seed_everything, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.core.optimizer import LightningOptimizer
from pytorch_lightning.utilities.exceptions import MisconfigurationException
from pytorch_lightning.utilities.model_utils import is_overridden
from tests.base.boring_model import BoringModel, RandomDataset, RandomDictDataset, RandomDictStringDataset
def test_lightning_optimizer(tmpdir):
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.SGD(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
groups = "{'dampening': 0, 'initial_lr': 0.1, 'lr': 0.01, 'momentum': 0, 'nesterov': False, 'weight_decay': 0}"
expected = f"LightningSGD(groups=[{groups}])"
assert trainer._lightning_optimizers[0].__repr__() == expected
def test_lightning_optimizer_from_user(tmpdir):
class TestModel(BoringModel):
def configure_optimizers(self):
optimizer = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer = LightningOptimizer(optimizer)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1)
return [optimizer], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=1,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
groups = "{'amsgrad': False, 'betas': (0.9, 0.999), 'eps': 1e-08, 'initial_lr': 0.1, 'lr': 0.01, 'weight_decay': 0}"
expected = f"LightningAdam(groups=[{groups}])"
assert trainer._lightning_optimizers[0].__repr__() == expected
@patch("torch.optim.Adam.step", autospec=True)
@patch("torch.optim.SGD.step", autospec=True)
def test_lightning_optimizer_manual_optimization(mock_sgd_step, mock_adam_step, tmpdir):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx, optimizer_idx=None):
(opt_1, opt_2) = self.optimizers()
assert isinstance(opt_1, LightningOptimizer)
assert isinstance(opt_2, LightningOptimizer)
output = self.layer(batch)
loss_1 = self.loss(batch, output)
self.manual_backward(loss_1, opt_1)
opt_1.step()
def closure():
output = self.layer(batch)
loss_2 = self.loss(batch, output)
self.manual_backward(loss_2, opt_2)
opt_2.step(closure=closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer_1 = LightningOptimizer(optimizer_1, 4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
model.training_step_end = None
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=8,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert len(mock_sgd_step.mock_calls) == 2
assert len(mock_adam_step.mock_calls) == 8
@patch("torch.optim.Adam.step", autospec=True)
@patch("torch.optim.SGD.step", autospec=True)
def test_lightning_optimizer_manual_optimization_and_accumulated_gradients(mock_sgd_step, mock_adam_step, tmpdir):
class TestModel(BoringModel):
def __init__(self):
super().__init__()
self.automatic_optimization = False
def training_step(self, batch, batch_idx, optimizer_idx=None):
(opt_1, opt_2) = self.optimizers()
assert isinstance(opt_1, LightningOptimizer)
assert isinstance(opt_2, LightningOptimizer)
output = self.layer(batch)
loss_1 = self.loss(batch, output)
self.manual_backward(loss_1, opt_1)
opt_1.step()
def closure():
output = self.layer(batch)
loss_2 = self.loss(batch, output)
self.manual_backward(loss_2, opt_2)
opt_2.step(closure=closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer_1 = LightningOptimizer(optimizer_1, 4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
model.training_step_end = None
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=8,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
accumulate_grad_batches=2,
)
trainer.fit(model)
assert len(mock_sgd_step.mock_calls) == 2
assert len(mock_adam_step.mock_calls) == 4
def test_state(tmpdir):
model = torch.nn.Linear(3, 4)
optimizer = torch.optim.Adam(model.parameters())
lightning_optimizer = LightningOptimizer(optimizer)
assert optimizer.state == lightning_optimizer.state
lightning_optimizer.state = optimizer.state
assert optimizer.state == lightning_optimizer.state
assert optimizer.param_groups == lightning_optimizer.param_groups
lightning_optimizer.param_groups = optimizer.param_groups
assert optimizer.param_groups == lightning_optimizer.param_groups
assert optimizer.defaults == lightning_optimizer.defaults
lightning_optimizer.defaults = optimizer.defaults
assert optimizer.defaults == lightning_optimizer.defaults
assert isinstance(lightning_optimizer, LightningOptimizer)
assert isinstance(lightning_optimizer, Adam)
assert isinstance(lightning_optimizer, Optimizer)
lightning_dict = {}
special_attrs = ["_accumulate_grad_batches", "_optimizer", "_optimizer_idx", "_support_closure",
"_trainer", "__getstate__", "__setstate__", "state_dict", "load_state_dict",
"zero_grad", "__setstate__", "add_param_group"]
for k, v in lightning_optimizer.__dict__.items():
if k not in special_attrs:
lightning_dict[k] = v
assert lightning_dict == optimizer.__dict__
assert optimizer.state_dict() == lightning_optimizer.state_dict()
assert optimizer.state == lightning_optimizer.state
def test_lightning_optimizer_automatic_optimization(tmpdir):
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
optimizer.step(closure=optimizer_closure, make_optimizer_step=batch_idx % 2 == 0)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
optimizer_1 = LightningOptimizer(optimizer_1, 4)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=10,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad(tmpdir):
with patch("torch.optim.Adam.zero_grad") as adam_zero_grad, \
patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
if optimizer_idx == 0:
if batch_idx % 2 == 0:
optimizer.zero_grad()
if optimizer_idx == 1:
if batch_idx % 5 == 0:
optimizer.zero_grad()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
optimizer.step(closure=optimizer_closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=10,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert adam_zero_grad.call_count == 2
assert sgd_zero_grad.call_count == 5
def test_lightning_optimizer_automatic_optimization_optimizer_zero_grad_make_optimizer_step(tmpdir):
try:
with patch("torch.optim.Adam.zero_grad") as adam_zero_grad, \
patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_zero_grad(self, epoch: int, batch_idx: int, optimizer: Optimizer, optimizer_idx: int):
if optimizer_idx == 0:
if batch_idx % 2 == 0:
optimizer.zero_grad()
if optimizer_idx == 1:
if batch_idx % 5 == 0:
optimizer.zero_grad()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
if optimizer_idx == 0:
optimizer.step(closure=optimizer_closure, make_optimizer_step=batch_idx % 3 == 0)
return
optimizer.step(closure=optimizer_closure)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=20,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert adam_zero_grad.call_count == 4
assert sgd_zero_grad.call_count == 10
except MisconfigurationException as e:
assert "When overriding LightningModule `optimizer_zero_grad`, make_optimizer_step is not allowed" in str(e)
def test_lightning_optimizer_automatic_optimization_make_optimizer_step_2(tmpdir):
with patch("torch.optim.Adam.zero_grad") as adam_zero_grad, \
patch("torch.optim.SGD.zero_grad") as sgd_zero_grad:
class TestModel(BoringModel):
def training_step(self, batch, batch_idx, optimizer_idx=None):
output = self.layer(batch)
loss = self.loss(batch, output)
return {"loss": loss}
def training_epoch_end(self, outputs):
outputs = sum(outputs, [])
torch.stack([x["loss"] for x in outputs]).mean()
def optimizer_step(self, epoch, batch_idx, optimizer, optimizer_idx,
optimizer_closure, on_tpu, using_native_amp, using_lbfgs):
assert optimizer_closure.__name__ == "train_step_and_backward_closure"
make_optimizer_step = None
if optimizer_idx == 0:
make_optimizer_step = batch_idx % 4 == 0
optimizer.step(closure=optimizer_closure, make_optimizer_step=make_optimizer_step)
def configure_optimizers(self):
optimizer_1 = torch.optim.SGD(self.layer.parameters(), lr=0.1)
optimizer_2 = torch.optim.Adam(self.layer.parameters(), lr=0.1)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_1, step_size=1)
return [optimizer_1, optimizer_2], [lr_scheduler]
model = TestModel()
trainer = Trainer(
default_root_dir=os.getcwd(),
limit_train_batches=20,
limit_val_batches=1,
max_epochs=1,
weights_summary=None,
)
trainer.fit(model)
assert adam_zero_grad.call_count == 20
assert sgd_zero_grad.call_count == 5
| true
| true
|
1c49853e566203e2d86ea511f9e25cee8a9845fb
| 2,074
|
py
|
Python
|
test/pycore/schema_gen.py
|
iGeeky/open-account
|
8e1329cddcb97517a841f3d98786ba4d76065e2b
|
[
"MIT"
] | 10
|
2021-01-17T14:12:01.000Z
|
2021-07-12T07:29:29.000Z
|
test/pycore/schema_gen.py
|
iGeeky/open-account
|
8e1329cddcb97517a841f3d98786ba4d76065e2b
|
[
"MIT"
] | null | null | null |
test/pycore/schema_gen.py
|
iGeeky/open-account
|
8e1329cddcb97517a841f3d98786ba4d76065e2b
|
[
"MIT"
] | 1
|
2022-01-02T15:18:40.000Z
|
2022-01-02T15:18:40.000Z
|
# coding=utf8
def get_type(value):
t = type(value)
if t == dict:
t = 'object'
elif t == list:
t = 'array'
elif value == None:
t = 'null'
elif t == str:
t = 'string'
elif t == int:
t = 'integer'
elif t == float:
t = 'number'
elif t == bool:
t = 'boolean'
else:
t = 'unknow'
return t
def generate_schema(field, value, **opts):
t = get_type(value)
schema = { "type": t }
opts = opts or {}
enums = opts.get("enums", False)
forceEnumFields = opts.get("forceEnumFields", {})
deep = opts.get("deep", 10)
curLevel = opts.get("curLevel", 0)
level = curLevel + 1
opts["curLevel"] = level
if t == 'object':
if level <= deep:
properties = {}
required = []
subFields = value.keys()
for subField in subFields:
childValue = value[subField]
properties[subField] = generate_schema(subField, childValue, **opts.copy())
required.append(subField)
schema["properties"] = properties
schema["required"] = required
elif t == 'array':
if level <= deep and len(value) > 0:
schema["items"] = generate_schema(None, value[0], **opts.copy())
elif t == 'number' or t == 'float' or t == 'string' or t == 'integer' or t == 'boolean':
if enums or (field and forceEnumFields and forceEnumFields[field]):
schema["enum"] = [value]
elif t == 'null': # null的不自动生成,指定null容易有出错的情况.
del(schema["type"])
else:
raise BaseException('UnKnown type:%s, value:%s' % (t, value))
return schema
def auto_schema(value, **opts):
return generate_schema(None, value, **opts)
def set_schema_enums(schema, enums):
for field in enums:
field_schema = schema.get(field)
if field_schema:
enum_value = enums[field]
if type(enum_value) != list:
enum_value = [enum_value]
field_schema["enum"] = enum_value
| 28.805556
| 92
| 0.540501
|
def get_type(value):
t = type(value)
if t == dict:
t = 'object'
elif t == list:
t = 'array'
elif value == None:
t = 'null'
elif t == str:
t = 'string'
elif t == int:
t = 'integer'
elif t == float:
t = 'number'
elif t == bool:
t = 'boolean'
else:
t = 'unknow'
return t
def generate_schema(field, value, **opts):
t = get_type(value)
schema = { "type": t }
opts = opts or {}
enums = opts.get("enums", False)
forceEnumFields = opts.get("forceEnumFields", {})
deep = opts.get("deep", 10)
curLevel = opts.get("curLevel", 0)
level = curLevel + 1
opts["curLevel"] = level
if t == 'object':
if level <= deep:
properties = {}
required = []
subFields = value.keys()
for subField in subFields:
childValue = value[subField]
properties[subField] = generate_schema(subField, childValue, **opts.copy())
required.append(subField)
schema["properties"] = properties
schema["required"] = required
elif t == 'array':
if level <= deep and len(value) > 0:
schema["items"] = generate_schema(None, value[0], **opts.copy())
elif t == 'number' or t == 'float' or t == 'string' or t == 'integer' or t == 'boolean':
if enums or (field and forceEnumFields and forceEnumFields[field]):
schema["enum"] = [value]
elif t == 'null': del(schema["type"])
else:
raise BaseException('UnKnown type:%s, value:%s' % (t, value))
return schema
def auto_schema(value, **opts):
return generate_schema(None, value, **opts)
def set_schema_enums(schema, enums):
for field in enums:
field_schema = schema.get(field)
if field_schema:
enum_value = enums[field]
if type(enum_value) != list:
enum_value = [enum_value]
field_schema["enum"] = enum_value
| true
| true
|
1c4986f6dbd679f80bc76138d54275a1e2e4f850
| 1,898
|
py
|
Python
|
nova/api/openstack/compute/views/addresses.py
|
hemanthnakkina/nova
|
3756f4ffa6ff670bfd6b491a12b833da0a36b017
|
[
"Apache-2.0"
] | 2
|
2021-10-11T04:56:25.000Z
|
2022-02-16T08:49:29.000Z
|
nova/api/openstack/compute/views/addresses.py
|
woraser/nova
|
fc3890667e4971e3f0f35ac921c2a6c25f72adec
|
[
"Apache-2.0"
] | 132
|
2017-03-27T11:31:52.000Z
|
2022-03-30T08:45:02.000Z
|
nova/api/openstack/compute/views/addresses.py
|
woraser/nova
|
fc3890667e4971e3f0f35ac921c2a6c25f72adec
|
[
"Apache-2.0"
] | 8
|
2017-03-27T07:50:38.000Z
|
2020-02-14T16:55:56.000Z
|
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import itertools
from nova.api.openstack import common
class ViewBuilder(common.ViewBuilder):
"""Models server addresses as a dictionary."""
_collection_name = "addresses"
def basic(self, ip, extend_address=False):
"""Return a dictionary describing an IP address."""
address = {
"version": ip["version"],
"addr": ip["address"],
}
if extend_address:
address.update({
"OS-EXT-IPS:type": ip["type"],
"OS-EXT-IPS-MAC:mac_addr": ip['mac_address'],
})
return address
def show(self, network, label, extend_address=False):
"""Returns a dictionary describing a network."""
all_ips = itertools.chain(network["ips"], network["floating_ips"])
return {label: [self.basic(ip, extend_address) for ip in all_ips]}
def index(self, networks, extend_address=False):
"""Return a dictionary describing a list of networks."""
addresses = collections.OrderedDict()
for label, network in networks.items():
network_dict = self.show(network, label, extend_address)
addresses[label] = network_dict[label]
return dict(addresses=addresses)
| 36.5
| 78
| 0.658061
|
import collections
import itertools
from nova.api.openstack import common
class ViewBuilder(common.ViewBuilder):
_collection_name = "addresses"
def basic(self, ip, extend_address=False):
address = {
"version": ip["version"],
"addr": ip["address"],
}
if extend_address:
address.update({
"OS-EXT-IPS:type": ip["type"],
"OS-EXT-IPS-MAC:mac_addr": ip['mac_address'],
})
return address
def show(self, network, label, extend_address=False):
all_ips = itertools.chain(network["ips"], network["floating_ips"])
return {label: [self.basic(ip, extend_address) for ip in all_ips]}
def index(self, networks, extend_address=False):
addresses = collections.OrderedDict()
for label, network in networks.items():
network_dict = self.show(network, label, extend_address)
addresses[label] = network_dict[label]
return dict(addresses=addresses)
| true
| true
|
1c49873014937e8fca246eefd2074eda95180dec
| 4,361
|
py
|
Python
|
examples/task_manager_plugin/task_manager_plugin_app.py
|
pxlc/PyWebEngineGui
|
12391f78e3708a7f61154331a01a193630f8f2e4
|
[
"MIT"
] | 1
|
2021-11-09T07:51:09.000Z
|
2021-11-09T07:51:09.000Z
|
examples/task_manager_plugin/task_manager_plugin_app.py
|
pxlc/PyWebEngineGui
|
12391f78e3708a7f61154331a01a193630f8f2e4
|
[
"MIT"
] | null | null | null |
examples/task_manager_plugin/task_manager_plugin_app.py
|
pxlc/PyWebEngineGui
|
12391f78e3708a7f61154331a01a193630f8f2e4
|
[
"MIT"
] | 1
|
2022-03-29T09:01:18.000Z
|
2022-03-29T09:01:18.000Z
|
# -------------------------------------------------------------------------------
# MIT License
#
# Copyright (c) 2021 pxlc@github
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# -------------------------------------------------------------------------------
import os
import sys
import json
import logging
from PyWebEngineGui.pweg import WebEngineDialogBase, register_op, launch_main_app
from directory_listing_task import directory_listing_task_validation
from directory_listing_task import directory_listing_task
class TaskManagerPluginApp(WebEngineDialogBase):
def __init__(self, parent=None, html_filepath='', app_title='', width=500, height=200,
log_level_str='INFO', log_to_shell=True, is_modal_dialog=True):
NEEDED_PLUGINS = ['TaskManager']
super(TaskManagerPluginApp, self).__init__(parent=parent, app_module_path=os.path.abspath(__file__),
html_filepath='', app_title=app_title,
width=width, height=height,
requested_plugins_list=NEEDED_PLUGINS,
override_session_log_filepath='',
log_level_str=log_level_str,
log_to_shell=log_to_shell,
is_modal_dialog=is_modal_dialog)
task_plugin = self.get_plugin_instance('TaskManager')
task_plugin.setup_task('DirectoryListing', directory_listing_task_validation,
directory_listing_task)
# --------------------------------------------------------------------------------------------------------
# "setup_extra_template_vars()" is a REQUIRED override method
#
# Establish any values for template vars in this method that you need to use in your HTML template file.
# --------------------------------------------------------------------------------------------------------
def setup_extra_template_vars(self):
return {
'APP_HEADER': '%s Example App' % self.get_app_title(),
}
# --------------------------------------------------------------------------------------------------------
# Register any callback op handler methods in this way ...
#
# @register_op
# def my_op_handler(self, op_data):
# # op_data is data dict received from JavaScript side
# for op_data_key in sorted(op_data.keys()):
# self.info(' %s = %s' % (op_data_key, op_data[op_data_key]))
#
# NOTE: DO NOT register an op handler method named "print_message" (that is a default one
# provided by the base class)
# --------------------------------------------------------------------------------------------------------
@register_op
def test_one_js_click(self, op_data):
self.info('')
self.info(':: got op "test_one_js_click" with data "{0}"'.format(op_data))
self.info('')
self.send_to_webbrowser('test_one', {'x': 999, 'y': 808, 'z': 345})
if __name__ == '__main__':
sys.exit(launch_main_app(TaskManagerPluginApp, app_title='Task Manager Example', width=600, height=400))
| 44.958763
| 110
| 0.561798
|
import os
import sys
import json
import logging
from PyWebEngineGui.pweg import WebEngineDialogBase, register_op, launch_main_app
from directory_listing_task import directory_listing_task_validation
from directory_listing_task import directory_listing_task
class TaskManagerPluginApp(WebEngineDialogBase):
def __init__(self, parent=None, html_filepath='', app_title='', width=500, height=200,
log_level_str='INFO', log_to_shell=True, is_modal_dialog=True):
NEEDED_PLUGINS = ['TaskManager']
super(TaskManagerPluginApp, self).__init__(parent=parent, app_module_path=os.path.abspath(__file__),
html_filepath='', app_title=app_title,
width=width, height=height,
requested_plugins_list=NEEDED_PLUGINS,
override_session_log_filepath='',
log_level_str=log_level_str,
log_to_shell=log_to_shell,
is_modal_dialog=is_modal_dialog)
task_plugin = self.get_plugin_instance('TaskManager')
task_plugin.setup_task('DirectoryListing', directory_listing_task_validation,
directory_listing_task)
def setup_extra_template_vars(self):
return {
'APP_HEADER': '%s Example App' % self.get_app_title(),
}
@register_op
def test_one_js_click(self, op_data):
self.info('')
self.info(':: got op "test_one_js_click" with data "{0}"'.format(op_data))
self.info('')
self.send_to_webbrowser('test_one', {'x': 999, 'y': 808, 'z': 345})
if __name__ == '__main__':
sys.exit(launch_main_app(TaskManagerPluginApp, app_title='Task Manager Example', width=600, height=400))
| true
| true
|
1c4987385c9c55d21a2f9a9cc2cc5c8df95c269c
| 829
|
py
|
Python
|
python/qitoolchain/actions/list.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
python/qitoolchain/actions/list.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
python/qitoolchain/actions/list.py
|
vbarbaresi/qibuild
|
eab6b815fe0af49ea5c41ccddcd0dff2363410e1
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) 2012-2018 SoftBank Robotics. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the COPYING file.
"""Display the toolchains names.
"""
from qisys import ui
import qisys.worktree
import qisys.parsers
import qitoolchain
def configure_parser(parser):
"""Configure parser for this action """
qisys.parsers.default_parser(parser)
def do(args): # pylint: disable=unused-argument
""" Main method """
tc_names = qitoolchain.get_tc_names()
if not tc_names:
ui.info("No toolchain yet", "\n",
"Use `qitoolchain create` to create a new toolchain")
return
ui.info("Known toolchains:")
for tc_name in tc_names:
ui.info("*", tc_name)
ui.info("Use ``qitoolchain info <tc_name>`` for more info")
| 26.741935
| 72
| 0.679131
|
from qisys import ui
import qisys.worktree
import qisys.parsers
import qitoolchain
def configure_parser(parser):
qisys.parsers.default_parser(parser)
def do(args): tc_names = qitoolchain.get_tc_names()
if not tc_names:
ui.info("No toolchain yet", "\n",
"Use `qitoolchain create` to create a new toolchain")
return
ui.info("Known toolchains:")
for tc_name in tc_names:
ui.info("*", tc_name)
ui.info("Use ``qitoolchain info <tc_name>`` for more info")
| true
| true
|
1c4987b52231b64a6534695e6c3a883f0c14cd41
| 2,952
|
py
|
Python
|
ospt/utils.py
|
Murray-LIANG/ospt
|
c1a2a89cc57d06d8bc6b1fd01b647c1f63ab9e2b
|
[
"Apache-2.0"
] | null | null | null |
ospt/utils.py
|
Murray-LIANG/ospt
|
c1a2a89cc57d06d8bc6b1fd01b647c1f63ab9e2b
|
[
"Apache-2.0"
] | null | null | null |
ospt/utils.py
|
Murray-LIANG/ospt
|
c1a2a89cc57d06d8bc6b1fd01b647c1f63ab9e2b
|
[
"Apache-2.0"
] | null | null | null |
import functools
import inspect
import logging
import time
from contextlib import contextmanager
from logging import handlers
from ospt import exceptions as ospt_ex
LOG = logging.getLogger()
def setup_log(file_path=None, level=logging.INFO, to_stdout=True,
max_bytes=104857600, max_file_count=5):
fmt_str = ('%(asctime)-15s %(name)-8s %(threadName)s '
'%(levelname)-4s %(message)s')
fmt = logging.Formatter(fmt_str)
# Set root logger to `level` or it would be warning which will
# suppress logs lower than warning.
root = logging.getLogger()
root.setLevel(level)
if to_stdout:
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(fmt)
root.addHandler(console)
if file_path:
file_handler = handlers.RotatingFileHandler(
filename=file_path, maxBytes=max_bytes, backupCount=max_file_count)
file_handler.setLevel(level)
file_handler.setFormatter(fmt)
root.addHandler(file_handler)
@contextmanager
def timer():
class _Time(object):
def __init__(self, time_start):
self.start = time_start
self.end = None
@property
def interval(self):
return self.end - self.start
_timer = _Time(time.time())
try:
yield _timer
finally:
_timer.end = time.time()
def to_str(resource):
if isinstance(resource, list) or isinstance(resource, tuple):
return ':'.join(to_str(each) for each in resource)
from ospt.control import Resource as OsptRes
if isinstance(resource, OsptRes):
return str(resource)
from storops.lib.resource import Resource as StoropsRes
if isinstance(resource, StoropsRes):
return 'id={},name={}'.format(resource.get_id(), resource.name)
return str(resource)
def timeit(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
LOG.info('%s: %s.', func.__name__, to_str(args))
with timer() as t:
result = func(*args, **kwargs)
LOG.info('TIME: %s, %s: %s.', t.interval, func.__name__, to_str(args))
return result
return _wrapper
def wait_until(res_manager, res_id, criteria, timeout=1200):
start_point = time.time()
while True:
if time.time() - start_point > timeout:
raise ospt_ex.TimeoutError(
'Timeout before {} becoming {}. {} sec passed.'.format(
res_id, criteria, timeout))
time.sleep(1)
try:
res = res_manager.get(res_id)
except Exception as ex:
if inspect.isclass(criteria) and isinstance(ex, criteria):
break
if res.status == criteria:
break
def sort_by_name(resources):
return sorted(resources, key=lambda x: x.name)
| 29.52
| 80
| 0.613144
|
import functools
import inspect
import logging
import time
from contextlib import contextmanager
from logging import handlers
from ospt import exceptions as ospt_ex
LOG = logging.getLogger()
def setup_log(file_path=None, level=logging.INFO, to_stdout=True,
max_bytes=104857600, max_file_count=5):
fmt_str = ('%(asctime)-15s %(name)-8s %(threadName)s '
'%(levelname)-4s %(message)s')
fmt = logging.Formatter(fmt_str)
root = logging.getLogger()
root.setLevel(level)
if to_stdout:
console = logging.StreamHandler()
console.setLevel(level)
console.setFormatter(fmt)
root.addHandler(console)
if file_path:
file_handler = handlers.RotatingFileHandler(
filename=file_path, maxBytes=max_bytes, backupCount=max_file_count)
file_handler.setLevel(level)
file_handler.setFormatter(fmt)
root.addHandler(file_handler)
@contextmanager
def timer():
class _Time(object):
def __init__(self, time_start):
self.start = time_start
self.end = None
@property
def interval(self):
return self.end - self.start
_timer = _Time(time.time())
try:
yield _timer
finally:
_timer.end = time.time()
def to_str(resource):
if isinstance(resource, list) or isinstance(resource, tuple):
return ':'.join(to_str(each) for each in resource)
from ospt.control import Resource as OsptRes
if isinstance(resource, OsptRes):
return str(resource)
from storops.lib.resource import Resource as StoropsRes
if isinstance(resource, StoropsRes):
return 'id={},name={}'.format(resource.get_id(), resource.name)
return str(resource)
def timeit(func):
@functools.wraps(func)
def _wrapper(*args, **kwargs):
LOG.info('%s: %s.', func.__name__, to_str(args))
with timer() as t:
result = func(*args, **kwargs)
LOG.info('TIME: %s, %s: %s.', t.interval, func.__name__, to_str(args))
return result
return _wrapper
def wait_until(res_manager, res_id, criteria, timeout=1200):
start_point = time.time()
while True:
if time.time() - start_point > timeout:
raise ospt_ex.TimeoutError(
'Timeout before {} becoming {}. {} sec passed.'.format(
res_id, criteria, timeout))
time.sleep(1)
try:
res = res_manager.get(res_id)
except Exception as ex:
if inspect.isclass(criteria) and isinstance(ex, criteria):
break
if res.status == criteria:
break
def sort_by_name(resources):
return sorted(resources, key=lambda x: x.name)
| true
| true
|
1c4987dda02a4463a27ae5d6523d313400cc871d
| 7,287
|
py
|
Python
|
options/valuation.py
|
JuanCRCano/AmericanOpt_Methods
|
38a4de4da20337e629ab47edf2d2e7e134586264
|
[
"MIT"
] | null | null | null |
options/valuation.py
|
JuanCRCano/AmericanOpt_Methods
|
38a4de4da20337e629ab47edf2d2e7e134586264
|
[
"MIT"
] | null | null | null |
options/valuation.py
|
JuanCRCano/AmericanOpt_Methods
|
38a4de4da20337e629ab47edf2d2e7e134586264
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import math as mt
from sklearn.linear_model import LinearRegression
def Binomial_Tree(Spot, Strike, Vencimiento, Volatilidad, TLibre_Riesgo, Call_Put, Tasa_Foranea=0, Tasa_Dividendo=0,
Ramificaciones_Arbol=100, Modelo="Cox Equity"):
if Modelo == "Cox Equity":
ConfigModelo = TLibre_Riesgo - Tasa_Dividendo
if Modelo == "Cox Futuros":
ConfigModelo = 0
if Modelo == "Cox Divisas":
ConfigModelo = TLibre_Riesgo - Tasa_Foranea
Arbol_Subyacente = np.zeros((Ramificaciones_Arbol + 1, Ramificaciones_Arbol + 1))
Arbol_Derivado = np.zeros((Ramificaciones_Arbol + 1, Ramificaciones_Arbol + 1))
Vencimiento = Vencimiento / 365.0
Steps = Vencimiento / Ramificaciones_Arbol
Up = mt.exp(Volatilidad * mt.sqrt(Steps))
Down = mt.exp(-Volatilidad * mt.sqrt(Steps))
P = (mt.exp(ConfigModelo * Steps) - Down) / (Up - Down)
# Obtener las ultimas ramas del arbol binomial del precio del subyacente
Arbol_Subyacente[0, 0] = Spot
for i in range(1, Ramificaciones_Arbol + 1):
Arbol_Subyacente[i, 0] = Arbol_Subyacente[i - 1, 0] * Up
for j in range(1, i + 1):
Arbol_Subyacente[i, j] = Arbol_Subyacente[i - 1, j - 1] * Down
for j in range(Ramificaciones_Arbol + 1):
Arbol_Derivado[Ramificaciones_Arbol, j] = max(0,
Call_Put * (Arbol_Subyacente[Ramificaciones_Arbol, j] - Strike))
for m in range(Ramificaciones_Arbol + 1):
i = Ramificaciones_Arbol - m - 1
for j in range(i + 1):
Arbol_Derivado[i, j] = max(Call_Put * (Arbol_Subyacente[i, j] - Strike),
(P * Arbol_Derivado[i + 1, j] + (1 - P) * Arbol_Derivado[i + 1, j + 1]) * mt.exp(
-TLibre_Riesgo * Steps))
# return pd.concat([pd.DataFrame(Arbol_Subyacente).replace(0,""),pd.DataFrame(Arbol_Derivado).replace(0,"")])
return Arbol_Derivado[0, 0]
def Trinomial_Tree(Spot, Strike, Vencimiento, Volatilidad, TLibre_Riesgo, Call_Put, Tasa_Foranea=0, Tasa_Dividendo=0,
Ramificaciones_Arbol=100, Modelo="Cox Equity"):
if Modelo == "Cox Equity":
ConfigModelo = TLibre_Riesgo - Tasa_Dividendo
if Modelo == "Cox Futuros":
ConfigModelo = 0
if Modelo == "Cox Divisas":
ConfigModelo = TLibre_Riesgo - Tasa_Foranea
Arbol_Subyacente = np.zeros((Ramificaciones_Arbol + 1, (2 * Ramificaciones_Arbol) + 1))
Arbol_Derivado = np.zeros((Ramificaciones_Arbol + 1, (2 * Ramificaciones_Arbol) + 1))
Vencimiento = Vencimiento / 365.0
Steps = Vencimiento / Ramificaciones_Arbol
Up = mt.exp(Volatilidad * mt.sqrt(2 * Steps))
Down = mt.exp(-Volatilidad * mt.sqrt(2 * Steps))
Pu = ((mt.exp(TLibre_Riesgo * Steps / 2) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2))) / (
mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2)))) ** 2
Pd = ((mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(TLibre_Riesgo * Steps / 2)) / (
mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2)))) ** 2
Pm = 1 - (Pu + Pd)
# Obtener las ultimas ramas del arbol binomial del precio del subyacente
Arbol_Subyacente[0, 0] = Spot
for i in range(1, Ramificaciones_Arbol + 1):
Arbol_Subyacente[i, 0] = Arbol_Subyacente[i - 1, 0] * Up
for j in range(1, (2 * i)):
Arbol_Subyacente[i, j] = Arbol_Subyacente[i - 1, j - 1]
Arbol_Subyacente[i, j + 1] = Arbol_Subyacente[i - 1, j - 1] * Down
for j in range((2 * Ramificaciones_Arbol) + 1):
Arbol_Derivado[Ramificaciones_Arbol, j] = max(Call_Put * (Arbol_Subyacente[Ramificaciones_Arbol, j] - Strike),
0)
for m in range(Ramificaciones_Arbol + 1):
i = Ramificaciones_Arbol - m - 1
for j in range((2 * i) + 1):
Arbol_Derivado[i, j] = max(Call_Put * (Arbol_Subyacente[i, j] - Strike), (
Pu * Arbol_Derivado[i + 1, j] + Pm * Arbol_Derivado[i + 1, j + 1] + Pd * Arbol_Derivado[
i + 1, j + 2]) * mt.exp(-TLibre_Riesgo * Steps))
# return pd.concat([pd.DataFrame(Arbol_Subyacente).replace(0,""),pd.DataFrame(Arbol_Derivado).replace(0,"")])
return Arbol_Derivado[0, 0]
def LSM(Spot,Strike,Vencimiento,Volatilidad,TLibre_Riesgo,Call_Put,NumSim=10,CambiosXDia=1):
Deltat = 1/(Vencimiento*CambiosXDia) # Asumo N Cambios en el precio del subyacente por cada día
Caminos_Subyacente = np.zeros((NumSim,(Vencimiento*CambiosXDia)+1))
v = Volatilidad/mt.sqrt(365/Vencimiento) # Se ajusta v pues v es anualizada
r = TLibre_Riesgo/(365/Vencimiento) # Se ajusta r pues r es anualizada
for m in range(0,NumSim):
Caminos_Subyacente[m,0] = Spot
for t in range(1,(Vencimiento*CambiosXDia)+1):
Caminos_Subyacente[m,t] = Caminos_Subyacente[m,t-1]*mt.exp((r - (v**2)/2)*Deltat + np.random.normal(0,1)*mt.sqrt((v**2)*Deltat))
Caminos_Derivado = np.zeros((NumSim,(Vencimiento*CambiosXDia)+1))
Caminos_Derivado[:,(Vencimiento*CambiosXDia)] = np.maximum((Caminos_Subyacente[:,(Vencimiento*CambiosXDia)] - Strike)*Call_Put,0)
for t in range((Vencimiento*CambiosXDia)-1,-1,-1):
Caminos_Derivado[:,t] = Caminos_Derivado[:,t+1]*mt.exp(-r*Deltat) # Valor de Continuidad Observado (HV)
Caminos_EnEl_Dinero = ((Caminos_Subyacente[:,t]-Strike)*Call_Put>0)
if Caminos_EnEl_Dinero.sum()>0:
Tabla_Regresion = np.zeros((Caminos_EnEl_Dinero.sum(),4))
Tabla_Regresion[:,0] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero] #np.vectorize(mt.exp)(-Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]/2)
Tabla_Regresion[:,1] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]**2 #np.vectorize(mt.exp)(-Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]/2)*(1-Caminos_Subyacente[:,t][Caminos_EnEl_Dinero])
Tabla_Regresion[:,2] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]**3 #np.vectorize(mt.exp)(-Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]/2)*(1-2*Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]+(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]**2)/2)
Modelo = LinearRegression().fit(Tabla_Regresion[:,0:3],Caminos_Derivado[:,t][Caminos_EnEl_Dinero])
#print(Modelo.score(Tabla_Regresion[:,0:3],Caminos_Derivado[:,t][Caminos_EnEl_Dinero]))
Tabla_Regresion[:,3] = Modelo.intercept_ + Modelo.coef_[0]*Tabla_Regresion[:,0] + Modelo.coef_[1]*Tabla_Regresion[:,1] + Modelo.coef_[2]*Tabla_Regresion[:,2] # Valor de Continuidad Esperado
# Your next line is: Si E[HV]<EV entonces EV, HV En otro caso (OV)
Caminos_Derivado[np.where(Caminos_EnEl_Dinero==True),t] = np.where(Tabla_Regresion[:,3]<(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]-Strike)*Call_Put,(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]-Strike)*Call_Put,Caminos_Derivado[:,t][Caminos_EnEl_Dinero])
#Caminos_Derivado[np.where((Caminos_EnEl_Dinero==True)&(Tabla_Regresion[:,3]<(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]-Strike)*Call_Put)),t+1] = 0
#return pd.DataFrame(Caminos_Subyacente)
return Caminos_Derivado[:,0].mean()
| 59.243902
| 269
| 0.651571
|
import pandas as pd
import numpy as np
import math as mt
from sklearn.linear_model import LinearRegression
def Binomial_Tree(Spot, Strike, Vencimiento, Volatilidad, TLibre_Riesgo, Call_Put, Tasa_Foranea=0, Tasa_Dividendo=0,
Ramificaciones_Arbol=100, Modelo="Cox Equity"):
if Modelo == "Cox Equity":
ConfigModelo = TLibre_Riesgo - Tasa_Dividendo
if Modelo == "Cox Futuros":
ConfigModelo = 0
if Modelo == "Cox Divisas":
ConfigModelo = TLibre_Riesgo - Tasa_Foranea
Arbol_Subyacente = np.zeros((Ramificaciones_Arbol + 1, Ramificaciones_Arbol + 1))
Arbol_Derivado = np.zeros((Ramificaciones_Arbol + 1, Ramificaciones_Arbol + 1))
Vencimiento = Vencimiento / 365.0
Steps = Vencimiento / Ramificaciones_Arbol
Up = mt.exp(Volatilidad * mt.sqrt(Steps))
Down = mt.exp(-Volatilidad * mt.sqrt(Steps))
P = (mt.exp(ConfigModelo * Steps) - Down) / (Up - Down)
Arbol_Subyacente[0, 0] = Spot
for i in range(1, Ramificaciones_Arbol + 1):
Arbol_Subyacente[i, 0] = Arbol_Subyacente[i - 1, 0] * Up
for j in range(1, i + 1):
Arbol_Subyacente[i, j] = Arbol_Subyacente[i - 1, j - 1] * Down
for j in range(Ramificaciones_Arbol + 1):
Arbol_Derivado[Ramificaciones_Arbol, j] = max(0,
Call_Put * (Arbol_Subyacente[Ramificaciones_Arbol, j] - Strike))
for m in range(Ramificaciones_Arbol + 1):
i = Ramificaciones_Arbol - m - 1
for j in range(i + 1):
Arbol_Derivado[i, j] = max(Call_Put * (Arbol_Subyacente[i, j] - Strike),
(P * Arbol_Derivado[i + 1, j] + (1 - P) * Arbol_Derivado[i + 1, j + 1]) * mt.exp(
-TLibre_Riesgo * Steps))
return Arbol_Derivado[0, 0]
def Trinomial_Tree(Spot, Strike, Vencimiento, Volatilidad, TLibre_Riesgo, Call_Put, Tasa_Foranea=0, Tasa_Dividendo=0,
Ramificaciones_Arbol=100, Modelo="Cox Equity"):
if Modelo == "Cox Equity":
ConfigModelo = TLibre_Riesgo - Tasa_Dividendo
if Modelo == "Cox Futuros":
ConfigModelo = 0
if Modelo == "Cox Divisas":
ConfigModelo = TLibre_Riesgo - Tasa_Foranea
Arbol_Subyacente = np.zeros((Ramificaciones_Arbol + 1, (2 * Ramificaciones_Arbol) + 1))
Arbol_Derivado = np.zeros((Ramificaciones_Arbol + 1, (2 * Ramificaciones_Arbol) + 1))
Vencimiento = Vencimiento / 365.0
Steps = Vencimiento / Ramificaciones_Arbol
Up = mt.exp(Volatilidad * mt.sqrt(2 * Steps))
Down = mt.exp(-Volatilidad * mt.sqrt(2 * Steps))
Pu = ((mt.exp(TLibre_Riesgo * Steps / 2) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2))) / (
mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2)))) ** 2
Pd = ((mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(TLibre_Riesgo * Steps / 2)) / (
mt.exp(Volatilidad * mt.sqrt(Steps / 2)) - mt.exp(-Volatilidad * mt.sqrt(Steps / 2)))) ** 2
Pm = 1 - (Pu + Pd)
Arbol_Subyacente[0, 0] = Spot
for i in range(1, Ramificaciones_Arbol + 1):
Arbol_Subyacente[i, 0] = Arbol_Subyacente[i - 1, 0] * Up
for j in range(1, (2 * i)):
Arbol_Subyacente[i, j] = Arbol_Subyacente[i - 1, j - 1]
Arbol_Subyacente[i, j + 1] = Arbol_Subyacente[i - 1, j - 1] * Down
for j in range((2 * Ramificaciones_Arbol) + 1):
Arbol_Derivado[Ramificaciones_Arbol, j] = max(Call_Put * (Arbol_Subyacente[Ramificaciones_Arbol, j] - Strike),
0)
for m in range(Ramificaciones_Arbol + 1):
i = Ramificaciones_Arbol - m - 1
for j in range((2 * i) + 1):
Arbol_Derivado[i, j] = max(Call_Put * (Arbol_Subyacente[i, j] - Strike), (
Pu * Arbol_Derivado[i + 1, j] + Pm * Arbol_Derivado[i + 1, j + 1] + Pd * Arbol_Derivado[
i + 1, j + 2]) * mt.exp(-TLibre_Riesgo * Steps))
return Arbol_Derivado[0, 0]
def LSM(Spot,Strike,Vencimiento,Volatilidad,TLibre_Riesgo,Call_Put,NumSim=10,CambiosXDia=1):
Deltat = 1/(Vencimiento*CambiosXDia) Caminos_Subyacente = np.zeros((NumSim,(Vencimiento*CambiosXDia)+1))
v = Volatilidad/mt.sqrt(365/Vencimiento) r = TLibre_Riesgo/(365/Vencimiento)
for m in range(0,NumSim):
Caminos_Subyacente[m,0] = Spot
for t in range(1,(Vencimiento*CambiosXDia)+1):
Caminos_Subyacente[m,t] = Caminos_Subyacente[m,t-1]*mt.exp((r - (v**2)/2)*Deltat + np.random.normal(0,1)*mt.sqrt((v**2)*Deltat))
Caminos_Derivado = np.zeros((NumSim,(Vencimiento*CambiosXDia)+1))
Caminos_Derivado[:,(Vencimiento*CambiosXDia)] = np.maximum((Caminos_Subyacente[:,(Vencimiento*CambiosXDia)] - Strike)*Call_Put,0)
for t in range((Vencimiento*CambiosXDia)-1,-1,-1):
Caminos_Derivado[:,t] = Caminos_Derivado[:,t+1]*mt.exp(-r*Deltat) Caminos_EnEl_Dinero = ((Caminos_Subyacente[:,t]-Strike)*Call_Put>0)
if Caminos_EnEl_Dinero.sum()>0:
Tabla_Regresion = np.zeros((Caminos_EnEl_Dinero.sum(),4))
Tabla_Regresion[:,0] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero] Tabla_Regresion[:,1] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]**2 Tabla_Regresion[:,2] = Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]**3 Modelo = LinearRegression().fit(Tabla_Regresion[:,0:3],Caminos_Derivado[:,t][Caminos_EnEl_Dinero])
Tabla_Regresion[:,3] = Modelo.intercept_ + Modelo.coef_[0]*Tabla_Regresion[:,0] + Modelo.coef_[1]*Tabla_Regresion[:,1] + Modelo.coef_[2]*Tabla_Regresion[:,2] Caminos_Derivado[np.where(Caminos_EnEl_Dinero==True),t] = np.where(Tabla_Regresion[:,3]<(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]-Strike)*Call_Put,(Caminos_Subyacente[:,t][Caminos_EnEl_Dinero]-Strike)*Call_Put,Caminos_Derivado[:,t][Caminos_EnEl_Dinero])
return Caminos_Derivado[:,0].mean()
| true
| true
|
1c4988afa1867c543a8f26fed4ae75527832aa35
| 2,372
|
py
|
Python
|
scalyr_agent/json_lib/__init__.py
|
code-sauce/scalyr-agent-2
|
41023d5c1272186193dd02900782b150dda5f38e
|
[
"Apache-2.0"
] | null | null | null |
scalyr_agent/json_lib/__init__.py
|
code-sauce/scalyr-agent-2
|
41023d5c1272186193dd02900782b150dda5f38e
|
[
"Apache-2.0"
] | null | null | null |
scalyr_agent/json_lib/__init__.py
|
code-sauce/scalyr-agent-2
|
41023d5c1272186193dd02900782b150dda5f38e
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2014 Scalyr Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------
r"""A lightweight JSON library used by the Scalyr agent to serialize data
for storage to disk and for sending over HTTP.
This library is used instead of python's default json library because
it supports some custom Scalyr extensions (chiefly it allows for comments
in the JSON) and the json library is not included in all versions of Python
supported by the Scalyr agent.
The classes exported by this package are:
JsonObject -- A JSON object containing keys and fields. Has similar methods as a dict.
JsonArray -- A JSON array. Has similar methods to a list.
JsonConversionException -- Exception raised when conversion of a field in a JSON object fails.
JsonMissingFieldException -- Exception raised when a request field in a JSON object is missing.
JsonParseException -- Exception raised when parsing a string as JSON fails.
The methods exported are:
parse -- Parses a string as JSON and returns the value.
serialize -- Serializes a JSON value to a string.
"""
__author__ = 'Steven Czerwinski <czerwin@scalyr.com>'
from scalyr_agent.json_lib.exceptions import JsonConversionException
from scalyr_agent.json_lib.exceptions import JsonMissingFieldException, JsonParseException
from scalyr_agent.json_lib.objects import JsonObject, JsonArray
from scalyr_agent.json_lib.parser import parse
from scalyr_agent.json_lib.serializer import serialize
from scalyr_agent.json_lib.serializer import serialize_as_length_prefixed_string
__all__ = ['parse', 'serialize', 'JsonObject', 'JsonArray', 'JsonConversionException', 'JsonMissingFieldException',
'JsonParseException', 'serialize_as_length_prefixed_string']
| 50.468085
| 115
| 0.73946
|
__author__ = 'Steven Czerwinski <czerwin@scalyr.com>'
from scalyr_agent.json_lib.exceptions import JsonConversionException
from scalyr_agent.json_lib.exceptions import JsonMissingFieldException, JsonParseException
from scalyr_agent.json_lib.objects import JsonObject, JsonArray
from scalyr_agent.json_lib.parser import parse
from scalyr_agent.json_lib.serializer import serialize
from scalyr_agent.json_lib.serializer import serialize_as_length_prefixed_string
__all__ = ['parse', 'serialize', 'JsonObject', 'JsonArray', 'JsonConversionException', 'JsonMissingFieldException',
'JsonParseException', 'serialize_as_length_prefixed_string']
| true
| true
|
1c4989f318ebf96499779f9a58c688a9a5cb6cda
| 28,860
|
py
|
Python
|
test/functional/test_framework/script.py
|
Groestlcoin/groestlcoin
|
e081d1e38dea360fe48f0c8eb59a384900e6c6af
|
[
"MIT"
] | 49
|
2017-06-27T17:36:20.000Z
|
2021-11-26T15:32:37.000Z
|
test/functional/test_framework/script.py
|
Groestlcoin/groestlcoin
|
e081d1e38dea360fe48f0c8eb59a384900e6c6af
|
[
"MIT"
] | 19
|
2016-11-06T21:44:47.000Z
|
2021-01-14T21:33:06.000Z
|
test/functional/test_framework/script.py
|
Groestlcoin/groestlcoin
|
e081d1e38dea360fe48f0c8eb59a384900e6c6af
|
[
"MIT"
] | 31
|
2016-11-07T02:04:00.000Z
|
2022-03-21T11:30:29.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Functionality to build scripts, as well as signature hash functions.
This file is modified from python-bitcoinlib.
"""
from collections import namedtuple
import hashlib
import struct
import unittest
from typing import List, Dict
from .key import TaggedHash, tweak_add_pubkey
from .messages import (
CTransaction,
CTxOut,
hash256,
ser_string,
ser_uint256,
sha256,
uint256_from_str,
)
MAX_SCRIPT_ELEMENT_SIZE = 520
LOCKTIME_THRESHOLD = 500000000
ANNEX_TAG = 0x50
LEAF_VERSION_TAPSCRIPT = 0xc0
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
def bn2vch(v):
"""Convert number to bitcoin-specific little endian format."""
# We need v.bit_length() bits, plus a sign bit for every nonzero number.
n_bits = v.bit_length() + (v != 0)
# The number of bytes for that is:
n_bytes = (n_bits + 7) // 8
# Convert number to absolute value + sign in top bit.
encoded_v = 0 if v == 0 else abs(v) | ((v < 0) << (n_bytes * 8 - 1))
# Serialize to bytes
return encoded_v.to_bytes(n_bytes, 'little')
class CScriptOp(int):
"""A single script opcode"""
__slots__ = ()
@staticmethod
def encode_op_pushdata(d):
"""Encode a PUSHDATA op, returning bytes"""
if len(d) < 0x4c:
return b'' + bytes([len(d)]) + d # OP_PUSHDATA
elif len(d) <= 0xff:
return b'\x4c' + bytes([len(d)]) + d # OP_PUSHDATA1
elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d # OP_PUSHDATA2
elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d # OP_PUSHDATA4
else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
"""Encode a small integer op, returning an opcode"""
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n - 1)
def decode_op_n(self):
"""Decode a small integer opcode, returning an integer"""
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1 + 1)
def is_small_int(self):
"""Return true if the op pushes a small integer to the stack"""
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super().__new__(cls, n))
return _opcode_instances[n]
OPCODE_NAMES: Dict[CScriptOp, str] = {}
_opcode_instances: List[CScriptOp] = []
# Populate opcode instance table
for n in range(0xff + 1):
CScriptOp(n)
# push value
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE = OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
# control
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
# stack ops
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
# splice ops
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
# bit logic
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
# numeric
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
# crypto
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
# expansion
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
# BIP 342 opcodes (Tapscript)
OP_CHECKSIGADD = CScriptOp(0xba)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0: 'OP_0',
OP_PUSHDATA1: 'OP_PUSHDATA1',
OP_PUSHDATA2: 'OP_PUSHDATA2',
OP_PUSHDATA4: 'OP_PUSHDATA4',
OP_1NEGATE: 'OP_1NEGATE',
OP_RESERVED: 'OP_RESERVED',
OP_1: 'OP_1',
OP_2: 'OP_2',
OP_3: 'OP_3',
OP_4: 'OP_4',
OP_5: 'OP_5',
OP_6: 'OP_6',
OP_7: 'OP_7',
OP_8: 'OP_8',
OP_9: 'OP_9',
OP_10: 'OP_10',
OP_11: 'OP_11',
OP_12: 'OP_12',
OP_13: 'OP_13',
OP_14: 'OP_14',
OP_15: 'OP_15',
OP_16: 'OP_16',
OP_NOP: 'OP_NOP',
OP_VER: 'OP_VER',
OP_IF: 'OP_IF',
OP_NOTIF: 'OP_NOTIF',
OP_VERIF: 'OP_VERIF',
OP_VERNOTIF: 'OP_VERNOTIF',
OP_ELSE: 'OP_ELSE',
OP_ENDIF: 'OP_ENDIF',
OP_VERIFY: 'OP_VERIFY',
OP_RETURN: 'OP_RETURN',
OP_TOALTSTACK: 'OP_TOALTSTACK',
OP_FROMALTSTACK: 'OP_FROMALTSTACK',
OP_2DROP: 'OP_2DROP',
OP_2DUP: 'OP_2DUP',
OP_3DUP: 'OP_3DUP',
OP_2OVER: 'OP_2OVER',
OP_2ROT: 'OP_2ROT',
OP_2SWAP: 'OP_2SWAP',
OP_IFDUP: 'OP_IFDUP',
OP_DEPTH: 'OP_DEPTH',
OP_DROP: 'OP_DROP',
OP_DUP: 'OP_DUP',
OP_NIP: 'OP_NIP',
OP_OVER: 'OP_OVER',
OP_PICK: 'OP_PICK',
OP_ROLL: 'OP_ROLL',
OP_ROT: 'OP_ROT',
OP_SWAP: 'OP_SWAP',
OP_TUCK: 'OP_TUCK',
OP_CAT: 'OP_CAT',
OP_SUBSTR: 'OP_SUBSTR',
OP_LEFT: 'OP_LEFT',
OP_RIGHT: 'OP_RIGHT',
OP_SIZE: 'OP_SIZE',
OP_INVERT: 'OP_INVERT',
OP_AND: 'OP_AND',
OP_OR: 'OP_OR',
OP_XOR: 'OP_XOR',
OP_EQUAL: 'OP_EQUAL',
OP_EQUALVERIFY: 'OP_EQUALVERIFY',
OP_RESERVED1: 'OP_RESERVED1',
OP_RESERVED2: 'OP_RESERVED2',
OP_1ADD: 'OP_1ADD',
OP_1SUB: 'OP_1SUB',
OP_2MUL: 'OP_2MUL',
OP_2DIV: 'OP_2DIV',
OP_NEGATE: 'OP_NEGATE',
OP_ABS: 'OP_ABS',
OP_NOT: 'OP_NOT',
OP_0NOTEQUAL: 'OP_0NOTEQUAL',
OP_ADD: 'OP_ADD',
OP_SUB: 'OP_SUB',
OP_MUL: 'OP_MUL',
OP_DIV: 'OP_DIV',
OP_MOD: 'OP_MOD',
OP_LSHIFT: 'OP_LSHIFT',
OP_RSHIFT: 'OP_RSHIFT',
OP_BOOLAND: 'OP_BOOLAND',
OP_BOOLOR: 'OP_BOOLOR',
OP_NUMEQUAL: 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY: 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL: 'OP_NUMNOTEQUAL',
OP_LESSTHAN: 'OP_LESSTHAN',
OP_GREATERTHAN: 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL: 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL: 'OP_GREATERTHANOREQUAL',
OP_MIN: 'OP_MIN',
OP_MAX: 'OP_MAX',
OP_WITHIN: 'OP_WITHIN',
OP_RIPEMD160: 'OP_RIPEMD160',
OP_SHA1: 'OP_SHA1',
OP_SHA256: 'OP_SHA256',
OP_HASH160: 'OP_HASH160',
OP_HASH256: 'OP_HASH256',
OP_CODESEPARATOR: 'OP_CODESEPARATOR',
OP_CHECKSIG: 'OP_CHECKSIG',
OP_CHECKSIGVERIFY: 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG: 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY: 'OP_CHECKMULTISIGVERIFY',
OP_NOP1: 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY: 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY: 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4: 'OP_NOP4',
OP_NOP5: 'OP_NOP5',
OP_NOP6: 'OP_NOP6',
OP_NOP7: 'OP_NOP7',
OP_NOP8: 'OP_NOP8',
OP_NOP9: 'OP_NOP9',
OP_NOP10: 'OP_NOP10',
OP_CHECKSIGADD: 'OP_CHECKSIGADD',
OP_INVALIDOPCODE: 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
"""Base class for CScript exceptions"""
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
"""Invalid pushdata due to truncation"""
def __init__(self, msg, data):
self.data = data
super().__init__(msg)
# This is used, eg, for blockchain heights in coinbase scripts (bip34)
class CScriptNum:
__slots__ = ("value",)
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes([len(r)]) + r
@staticmethod
def decode(vch):
result = 0
# We assume valid push_size and minimal encoding
value = vch[1:]
if len(value) == 0:
return result
for i, byte in enumerate(value):
result |= int(byte) << 8 * i
if value[-1] >= 0x80:
# Mask for all but the highest result bit
num_mask = (2**(len(value) * 8) - 1) >> 1
result &= num_mask
result *= -1
return result
class CScript(bytes):
"""Serialized script
A bytes subclass, so you can use this directly whenever bytes are accepted.
Note that this means that indexing does *not* work - you'll get an index by
byte rather than opcode. This format was chosen for efficiency so that the
general case would not require creating a lot of little CScriptOP objects.
iter(script) however does iterate by opcode.
"""
__slots__ = ()
@classmethod
def __coerce_instance(cls, other):
# Coerce other into bytes
if isinstance(other, CScriptOp):
other = bytes([other])
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bytes([CScriptOp(OP_0)])
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes([CScriptOp.encode_op_n(other)])
elif other == -1:
other = bytes([OP_1NEGATE])
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
# add makes no sense for a CScript()
raise NotImplementedError
def join(self, iterable):
# join makes no sense for a CScript()
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super().__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
# Annoyingly on both python2 and python3 bytes.join() always
# returns a bytes instance even when subclassed.
return super().__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
"""Raw iteration
Yields tuples of (opcode, data, sop_idx) so that the different possible
PUSHDATA encodings can be accurately distinguished, as well as
determining the exact opcode byte indexes. (sop_idx)
"""
i = 0
while i < len(self):
sop_idx = i
opcode = self[i]
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = self[i]
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = self[i] + (self[i + 1] << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = self[i] + (self[i + 1] << 8) + (self[i + 2] << 16) + (self[i + 3] << 24)
i += 4
else:
assert False # shouldn't happen
data = bytes(self[i:i + datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
"""'Cooked' iteration
Returns either a CScriptOP instance, an integer, or bytes, as
appropriate.
See raw_iter() if you need to distinguish the different possible
PUSHDATA encodings.
"""
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % o.hex()
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
"""Get the SigOp count.
fAccurate - Accurately count CHECKMULTISIG, see BIP16 for details.
Note that this is consensus-critical.
"""
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_DEFAULT = 0 # Taproot-only default, semantics same as SIGHASH_ALL
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
"""Consensus critical, see FindAndDelete() in Satoshi codebase"""
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def LegacySignatureHash(script, txTo, inIdx, hashtype):
"""Consensus-correct SignatureHash
Returns (hash, err) to precisely match the consensus-critical behavior of
the SIGHASH_SINGLE bug. (inIdx is *not* checked for validity)
"""
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for _ in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize_without_witness()
s += struct.pack(b"<I", hashtype)
hash = sha256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitV0SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(sha256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(sha256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(sha256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(sha256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return sha256(ss)
class TestFrameworkScript(unittest.TestCase):
def test_bn2vch(self):
self.assertEqual(bn2vch(0), bytes([]))
self.assertEqual(bn2vch(1), bytes([0x01]))
self.assertEqual(bn2vch(-1), bytes([0x81]))
self.assertEqual(bn2vch(0x7F), bytes([0x7F]))
self.assertEqual(bn2vch(-0x7F), bytes([0xFF]))
self.assertEqual(bn2vch(0x80), bytes([0x80, 0x00]))
self.assertEqual(bn2vch(-0x80), bytes([0x80, 0x80]))
self.assertEqual(bn2vch(0xFF), bytes([0xFF, 0x00]))
self.assertEqual(bn2vch(-0xFF), bytes([0xFF, 0x80]))
self.assertEqual(bn2vch(0x100), bytes([0x00, 0x01]))
self.assertEqual(bn2vch(-0x100), bytes([0x00, 0x81]))
self.assertEqual(bn2vch(0x7FFF), bytes([0xFF, 0x7F]))
self.assertEqual(bn2vch(-0x8000), bytes([0x00, 0x80, 0x80]))
self.assertEqual(bn2vch(-0x7FFFFF), bytes([0xFF, 0xFF, 0xFF]))
self.assertEqual(bn2vch(0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x00]))
self.assertEqual(bn2vch(-0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x80]))
self.assertEqual(bn2vch(0xFFFFFFFF), bytes([0xFF, 0xFF, 0xFF, 0xFF, 0x00]))
self.assertEqual(bn2vch(123456789), bytes([0x15, 0xCD, 0x5B, 0x07]))
self.assertEqual(bn2vch(-54321), bytes([0x31, 0xD4, 0x80]))
def test_cscriptnum_encoding(self):
# round-trip negative and multi-byte CScriptNums
values = [0, 1, -1, -2, 127, 128, -255, 256, (1 << 15) - 1, -(1 << 16), (1 << 24) - 1, (1 << 31), 1 - (1 << 32), 1 << 40, 1500, -1500]
for value in values:
self.assertEqual(CScriptNum.decode(CScriptNum.encode(CScriptNum(value))), value)
def TaprootSignatureHash(txTo, spent_utxos, hash_type, input_index = 0, scriptpath = False, script = CScript(), codeseparator_pos = -1, annex = None, leaf_ver = LEAF_VERSION_TAPSCRIPT):
assert (len(txTo.vin) == len(spent_utxos))
assert (input_index < len(txTo.vin))
out_type = SIGHASH_ALL if hash_type == 0 else hash_type & 3
in_type = hash_type & SIGHASH_ANYONECANPAY
spk = spent_utxos[input_index].scriptPubKey
ss = bytes([0, hash_type]) # epoch, hash_type
ss += struct.pack("<i", txTo.nVersion)
ss += struct.pack("<I", txTo.nLockTime)
if in_type != SIGHASH_ANYONECANPAY:
ss += sha256(b"".join(i.prevout.serialize() for i in txTo.vin))
ss += sha256(b"".join(struct.pack("<q", u.nValue) for u in spent_utxos))
ss += sha256(b"".join(ser_string(u.scriptPubKey) for u in spent_utxos))
ss += sha256(b"".join(struct.pack("<I", i.nSequence) for i in txTo.vin))
if out_type == SIGHASH_ALL:
ss += sha256(b"".join(o.serialize() for o in txTo.vout))
spend_type = 0
if annex is not None:
spend_type |= 1
if (scriptpath):
spend_type |= 2
ss += bytes([spend_type])
if in_type == SIGHASH_ANYONECANPAY:
ss += txTo.vin[input_index].prevout.serialize()
ss += struct.pack("<q", spent_utxos[input_index].nValue)
ss += ser_string(spk)
ss += struct.pack("<I", txTo.vin[input_index].nSequence)
else:
ss += struct.pack("<I", input_index)
if (spend_type & 1):
ss += sha256(ser_string(annex))
if out_type == SIGHASH_SINGLE:
if input_index < len(txTo.vout):
ss += sha256(txTo.vout[input_index].serialize())
else:
ss += bytes(0 for _ in range(32))
if (scriptpath):
ss += TaggedHash("TapLeaf", bytes([leaf_ver]) + ser_string(script))
ss += bytes([0])
ss += struct.pack("<i", codeseparator_pos)
assert len(ss) == 175 - (in_type == SIGHASH_ANYONECANPAY) * 49 - (out_type != SIGHASH_ALL and out_type != SIGHASH_SINGLE) * 32 + (annex is not None) * 32 + scriptpath * 37
return TaggedHash("TapSighash", ss)
def taproot_tree_helper(scripts):
if len(scripts) == 0:
return ([], bytes())
if len(scripts) == 1:
# One entry: treat as a leaf
script = scripts[0]
assert(not callable(script))
if isinstance(script, list):
return taproot_tree_helper(script)
assert(isinstance(script, tuple))
version = LEAF_VERSION_TAPSCRIPT
name = script[0]
code = script[1]
if len(script) == 3:
version = script[2]
assert version & 1 == 0
assert isinstance(code, bytes)
h = TaggedHash("TapLeaf", bytes([version]) + ser_string(code))
if name is None:
return ([], h)
return ([(name, version, code, bytes())], h)
elif len(scripts) == 2 and callable(scripts[1]):
# Two entries, and the right one is a function
left, left_h = taproot_tree_helper(scripts[0:1])
right_h = scripts[1](left_h)
left = [(name, version, script, control + right_h) for name, version, script, control in left]
right = []
else:
# Two or more entries: descend into each side
split_pos = len(scripts) // 2
left, left_h = taproot_tree_helper(scripts[0:split_pos])
right, right_h = taproot_tree_helper(scripts[split_pos:])
left = [(name, version, script, control + right_h) for name, version, script, control in left]
right = [(name, version, script, control + left_h) for name, version, script, control in right]
if right_h < left_h:
right_h, left_h = left_h, right_h
h = TaggedHash("TapBranch", left_h + right_h)
return (left + right, h)
# A TaprootInfo object has the following fields:
# - scriptPubKey: the scriptPubKey (witness v1 CScript)
# - internal_pubkey: the internal pubkey (32 bytes)
# - negflag: whether the pubkey in the scriptPubKey was negated from internal_pubkey+tweak*G (bool).
# - tweak: the tweak (32 bytes)
# - leaves: a dict of name -> TaprootLeafInfo objects for all known leaves
TaprootInfo = namedtuple("TaprootInfo", "scriptPubKey,internal_pubkey,negflag,tweak,leaves")
# A TaprootLeafInfo object has the following fields:
# - script: the leaf script (CScript or bytes)
# - version: the leaf version (0xc0 for BIP342 tapscript)
# - merklebranch: the merkle branch to use for this leaf (32*N bytes)
TaprootLeafInfo = namedtuple("TaprootLeafInfo", "script,version,merklebranch")
def taproot_construct(pubkey, scripts=None):
"""Construct a tree of Taproot spending conditions
pubkey: a 32-byte xonly pubkey for the internal pubkey (bytes)
scripts: a list of items; each item is either:
- a (name, CScript or bytes, leaf version) tuple
- a (name, CScript or bytes) tuple (defaulting to leaf version 0xc0)
- another list of items (with the same structure)
- a list of two items; the first of which is an item itself, and the
second is a function. The function takes as input the Merkle root of the
first item, and produces a (fictitious) partner to hash with.
Returns: a TaprootInfo object
"""
if scripts is None:
scripts = []
ret, h = taproot_tree_helper(scripts)
tweak = TaggedHash("TapTweak", pubkey + h)
tweaked, negated = tweak_add_pubkey(pubkey, tweak)
leaves = dict((name, TaprootLeafInfo(script, version, merklebranch)) for name, version, script, merklebranch in ret)
return TaprootInfo(CScript([OP_1, tweaked]), pubkey, negated + 0, tweak, leaves)
def is_op_success(o):
return o == 0x50 or o == 0x62 or o == 0x89 or o == 0x8a or o == 0x8d or o == 0x8e or (o >= 0x7e and o <= 0x81) or (o >= 0x83 and o <= 0x86) or (o >= 0x95 and o <= 0x99) or (o >= 0xbb and o <= 0xfe)
| 33.325635
| 201
| 0.618919
|
from collections import namedtuple
import hashlib
import struct
import unittest
from typing import List, Dict
from .key import TaggedHash, tweak_add_pubkey
from .messages import (
CTransaction,
CTxOut,
hash256,
ser_string,
ser_uint256,
sha256,
uint256_from_str,
)
MAX_SCRIPT_ELEMENT_SIZE = 520
LOCKTIME_THRESHOLD = 500000000
ANNEX_TAG = 0x50
LEAF_VERSION_TAPSCRIPT = 0xc0
def hash160(s):
return hashlib.new('ripemd160', sha256(s)).digest()
def bn2vch(v):
n_bits = v.bit_length() + (v != 0)
n_bytes = (n_bits + 7) // 8
encoded_v = 0 if v == 0 else abs(v) | ((v < 0) << (n_bytes * 8 - 1))
return encoded_v.to_bytes(n_bytes, 'little')
class CScriptOp(int):
__slots__ = ()
@staticmethod
def encode_op_pushdata(d):
if len(d) < 0x4c:
return b'' + bytes([len(d)]) + d elif len(d) <= 0xff:
return b'\x4c' + bytes([len(d)]) + d elif len(d) <= 0xffff:
return b'\x4d' + struct.pack(b'<H', len(d)) + d elif len(d) <= 0xffffffff:
return b'\x4e' + struct.pack(b'<I', len(d)) + d else:
raise ValueError("Data too long to encode in a PUSHDATA op")
@staticmethod
def encode_op_n(n):
if not (0 <= n <= 16):
raise ValueError('Integer must be in range 0 <= n <= 16, got %d' % n)
if n == 0:
return OP_0
else:
return CScriptOp(OP_1 + n - 1)
def decode_op_n(self):
if self == OP_0:
return 0
if not (self == OP_0 or OP_1 <= self <= OP_16):
raise ValueError('op %r is not an OP_N' % self)
return int(self - OP_1 + 1)
def is_small_int(self):
if 0x51 <= self <= 0x60 or self == 0:
return True
else:
return False
def __str__(self):
return repr(self)
def __repr__(self):
if self in OPCODE_NAMES:
return OPCODE_NAMES[self]
else:
return 'CScriptOp(0x%x)' % self
def __new__(cls, n):
try:
return _opcode_instances[n]
except IndexError:
assert len(_opcode_instances) == n
_opcode_instances.append(super().__new__(cls, n))
return _opcode_instances[n]
OPCODE_NAMES: Dict[CScriptOp, str] = {}
_opcode_instances: List[CScriptOp] = []
for n in range(0xff + 1):
CScriptOp(n)
OP_0 = CScriptOp(0x00)
OP_FALSE = OP_0
OP_PUSHDATA1 = CScriptOp(0x4c)
OP_PUSHDATA2 = CScriptOp(0x4d)
OP_PUSHDATA4 = CScriptOp(0x4e)
OP_1NEGATE = CScriptOp(0x4f)
OP_RESERVED = CScriptOp(0x50)
OP_1 = CScriptOp(0x51)
OP_TRUE = OP_1
OP_2 = CScriptOp(0x52)
OP_3 = CScriptOp(0x53)
OP_4 = CScriptOp(0x54)
OP_5 = CScriptOp(0x55)
OP_6 = CScriptOp(0x56)
OP_7 = CScriptOp(0x57)
OP_8 = CScriptOp(0x58)
OP_9 = CScriptOp(0x59)
OP_10 = CScriptOp(0x5a)
OP_11 = CScriptOp(0x5b)
OP_12 = CScriptOp(0x5c)
OP_13 = CScriptOp(0x5d)
OP_14 = CScriptOp(0x5e)
OP_15 = CScriptOp(0x5f)
OP_16 = CScriptOp(0x60)
OP_NOP = CScriptOp(0x61)
OP_VER = CScriptOp(0x62)
OP_IF = CScriptOp(0x63)
OP_NOTIF = CScriptOp(0x64)
OP_VERIF = CScriptOp(0x65)
OP_VERNOTIF = CScriptOp(0x66)
OP_ELSE = CScriptOp(0x67)
OP_ENDIF = CScriptOp(0x68)
OP_VERIFY = CScriptOp(0x69)
OP_RETURN = CScriptOp(0x6a)
OP_TOALTSTACK = CScriptOp(0x6b)
OP_FROMALTSTACK = CScriptOp(0x6c)
OP_2DROP = CScriptOp(0x6d)
OP_2DUP = CScriptOp(0x6e)
OP_3DUP = CScriptOp(0x6f)
OP_2OVER = CScriptOp(0x70)
OP_2ROT = CScriptOp(0x71)
OP_2SWAP = CScriptOp(0x72)
OP_IFDUP = CScriptOp(0x73)
OP_DEPTH = CScriptOp(0x74)
OP_DROP = CScriptOp(0x75)
OP_DUP = CScriptOp(0x76)
OP_NIP = CScriptOp(0x77)
OP_OVER = CScriptOp(0x78)
OP_PICK = CScriptOp(0x79)
OP_ROLL = CScriptOp(0x7a)
OP_ROT = CScriptOp(0x7b)
OP_SWAP = CScriptOp(0x7c)
OP_TUCK = CScriptOp(0x7d)
OP_CAT = CScriptOp(0x7e)
OP_SUBSTR = CScriptOp(0x7f)
OP_LEFT = CScriptOp(0x80)
OP_RIGHT = CScriptOp(0x81)
OP_SIZE = CScriptOp(0x82)
OP_INVERT = CScriptOp(0x83)
OP_AND = CScriptOp(0x84)
OP_OR = CScriptOp(0x85)
OP_XOR = CScriptOp(0x86)
OP_EQUAL = CScriptOp(0x87)
OP_EQUALVERIFY = CScriptOp(0x88)
OP_RESERVED1 = CScriptOp(0x89)
OP_RESERVED2 = CScriptOp(0x8a)
OP_1ADD = CScriptOp(0x8b)
OP_1SUB = CScriptOp(0x8c)
OP_2MUL = CScriptOp(0x8d)
OP_2DIV = CScriptOp(0x8e)
OP_NEGATE = CScriptOp(0x8f)
OP_ABS = CScriptOp(0x90)
OP_NOT = CScriptOp(0x91)
OP_0NOTEQUAL = CScriptOp(0x92)
OP_ADD = CScriptOp(0x93)
OP_SUB = CScriptOp(0x94)
OP_MUL = CScriptOp(0x95)
OP_DIV = CScriptOp(0x96)
OP_MOD = CScriptOp(0x97)
OP_LSHIFT = CScriptOp(0x98)
OP_RSHIFT = CScriptOp(0x99)
OP_BOOLAND = CScriptOp(0x9a)
OP_BOOLOR = CScriptOp(0x9b)
OP_NUMEQUAL = CScriptOp(0x9c)
OP_NUMEQUALVERIFY = CScriptOp(0x9d)
OP_NUMNOTEQUAL = CScriptOp(0x9e)
OP_LESSTHAN = CScriptOp(0x9f)
OP_GREATERTHAN = CScriptOp(0xa0)
OP_LESSTHANOREQUAL = CScriptOp(0xa1)
OP_GREATERTHANOREQUAL = CScriptOp(0xa2)
OP_MIN = CScriptOp(0xa3)
OP_MAX = CScriptOp(0xa4)
OP_WITHIN = CScriptOp(0xa5)
OP_RIPEMD160 = CScriptOp(0xa6)
OP_SHA1 = CScriptOp(0xa7)
OP_SHA256 = CScriptOp(0xa8)
OP_HASH160 = CScriptOp(0xa9)
OP_HASH256 = CScriptOp(0xaa)
OP_CODESEPARATOR = CScriptOp(0xab)
OP_CHECKSIG = CScriptOp(0xac)
OP_CHECKSIGVERIFY = CScriptOp(0xad)
OP_CHECKMULTISIG = CScriptOp(0xae)
OP_CHECKMULTISIGVERIFY = CScriptOp(0xaf)
OP_NOP1 = CScriptOp(0xb0)
OP_CHECKLOCKTIMEVERIFY = CScriptOp(0xb1)
OP_CHECKSEQUENCEVERIFY = CScriptOp(0xb2)
OP_NOP4 = CScriptOp(0xb3)
OP_NOP5 = CScriptOp(0xb4)
OP_NOP6 = CScriptOp(0xb5)
OP_NOP7 = CScriptOp(0xb6)
OP_NOP8 = CScriptOp(0xb7)
OP_NOP9 = CScriptOp(0xb8)
OP_NOP10 = CScriptOp(0xb9)
OP_CHECKSIGADD = CScriptOp(0xba)
OP_INVALIDOPCODE = CScriptOp(0xff)
OPCODE_NAMES.update({
OP_0: 'OP_0',
OP_PUSHDATA1: 'OP_PUSHDATA1',
OP_PUSHDATA2: 'OP_PUSHDATA2',
OP_PUSHDATA4: 'OP_PUSHDATA4',
OP_1NEGATE: 'OP_1NEGATE',
OP_RESERVED: 'OP_RESERVED',
OP_1: 'OP_1',
OP_2: 'OP_2',
OP_3: 'OP_3',
OP_4: 'OP_4',
OP_5: 'OP_5',
OP_6: 'OP_6',
OP_7: 'OP_7',
OP_8: 'OP_8',
OP_9: 'OP_9',
OP_10: 'OP_10',
OP_11: 'OP_11',
OP_12: 'OP_12',
OP_13: 'OP_13',
OP_14: 'OP_14',
OP_15: 'OP_15',
OP_16: 'OP_16',
OP_NOP: 'OP_NOP',
OP_VER: 'OP_VER',
OP_IF: 'OP_IF',
OP_NOTIF: 'OP_NOTIF',
OP_VERIF: 'OP_VERIF',
OP_VERNOTIF: 'OP_VERNOTIF',
OP_ELSE: 'OP_ELSE',
OP_ENDIF: 'OP_ENDIF',
OP_VERIFY: 'OP_VERIFY',
OP_RETURN: 'OP_RETURN',
OP_TOALTSTACK: 'OP_TOALTSTACK',
OP_FROMALTSTACK: 'OP_FROMALTSTACK',
OP_2DROP: 'OP_2DROP',
OP_2DUP: 'OP_2DUP',
OP_3DUP: 'OP_3DUP',
OP_2OVER: 'OP_2OVER',
OP_2ROT: 'OP_2ROT',
OP_2SWAP: 'OP_2SWAP',
OP_IFDUP: 'OP_IFDUP',
OP_DEPTH: 'OP_DEPTH',
OP_DROP: 'OP_DROP',
OP_DUP: 'OP_DUP',
OP_NIP: 'OP_NIP',
OP_OVER: 'OP_OVER',
OP_PICK: 'OP_PICK',
OP_ROLL: 'OP_ROLL',
OP_ROT: 'OP_ROT',
OP_SWAP: 'OP_SWAP',
OP_TUCK: 'OP_TUCK',
OP_CAT: 'OP_CAT',
OP_SUBSTR: 'OP_SUBSTR',
OP_LEFT: 'OP_LEFT',
OP_RIGHT: 'OP_RIGHT',
OP_SIZE: 'OP_SIZE',
OP_INVERT: 'OP_INVERT',
OP_AND: 'OP_AND',
OP_OR: 'OP_OR',
OP_XOR: 'OP_XOR',
OP_EQUAL: 'OP_EQUAL',
OP_EQUALVERIFY: 'OP_EQUALVERIFY',
OP_RESERVED1: 'OP_RESERVED1',
OP_RESERVED2: 'OP_RESERVED2',
OP_1ADD: 'OP_1ADD',
OP_1SUB: 'OP_1SUB',
OP_2MUL: 'OP_2MUL',
OP_2DIV: 'OP_2DIV',
OP_NEGATE: 'OP_NEGATE',
OP_ABS: 'OP_ABS',
OP_NOT: 'OP_NOT',
OP_0NOTEQUAL: 'OP_0NOTEQUAL',
OP_ADD: 'OP_ADD',
OP_SUB: 'OP_SUB',
OP_MUL: 'OP_MUL',
OP_DIV: 'OP_DIV',
OP_MOD: 'OP_MOD',
OP_LSHIFT: 'OP_LSHIFT',
OP_RSHIFT: 'OP_RSHIFT',
OP_BOOLAND: 'OP_BOOLAND',
OP_BOOLOR: 'OP_BOOLOR',
OP_NUMEQUAL: 'OP_NUMEQUAL',
OP_NUMEQUALVERIFY: 'OP_NUMEQUALVERIFY',
OP_NUMNOTEQUAL: 'OP_NUMNOTEQUAL',
OP_LESSTHAN: 'OP_LESSTHAN',
OP_GREATERTHAN: 'OP_GREATERTHAN',
OP_LESSTHANOREQUAL: 'OP_LESSTHANOREQUAL',
OP_GREATERTHANOREQUAL: 'OP_GREATERTHANOREQUAL',
OP_MIN: 'OP_MIN',
OP_MAX: 'OP_MAX',
OP_WITHIN: 'OP_WITHIN',
OP_RIPEMD160: 'OP_RIPEMD160',
OP_SHA1: 'OP_SHA1',
OP_SHA256: 'OP_SHA256',
OP_HASH160: 'OP_HASH160',
OP_HASH256: 'OP_HASH256',
OP_CODESEPARATOR: 'OP_CODESEPARATOR',
OP_CHECKSIG: 'OP_CHECKSIG',
OP_CHECKSIGVERIFY: 'OP_CHECKSIGVERIFY',
OP_CHECKMULTISIG: 'OP_CHECKMULTISIG',
OP_CHECKMULTISIGVERIFY: 'OP_CHECKMULTISIGVERIFY',
OP_NOP1: 'OP_NOP1',
OP_CHECKLOCKTIMEVERIFY: 'OP_CHECKLOCKTIMEVERIFY',
OP_CHECKSEQUENCEVERIFY: 'OP_CHECKSEQUENCEVERIFY',
OP_NOP4: 'OP_NOP4',
OP_NOP5: 'OP_NOP5',
OP_NOP6: 'OP_NOP6',
OP_NOP7: 'OP_NOP7',
OP_NOP8: 'OP_NOP8',
OP_NOP9: 'OP_NOP9',
OP_NOP10: 'OP_NOP10',
OP_CHECKSIGADD: 'OP_CHECKSIGADD',
OP_INVALIDOPCODE: 'OP_INVALIDOPCODE',
})
class CScriptInvalidError(Exception):
pass
class CScriptTruncatedPushDataError(CScriptInvalidError):
def __init__(self, msg, data):
self.data = data
super().__init__(msg)
class CScriptNum:
__slots__ = ("value",)
def __init__(self, d=0):
self.value = d
@staticmethod
def encode(obj):
r = bytearray(0)
if obj.value == 0:
return bytes(r)
neg = obj.value < 0
absvalue = -obj.value if neg else obj.value
while (absvalue):
r.append(absvalue & 0xff)
absvalue >>= 8
if r[-1] & 0x80:
r.append(0x80 if neg else 0)
elif neg:
r[-1] |= 0x80
return bytes([len(r)]) + r
@staticmethod
def decode(vch):
result = 0
value = vch[1:]
if len(value) == 0:
return result
for i, byte in enumerate(value):
result |= int(byte) << 8 * i
if value[-1] >= 0x80:
num_mask = (2**(len(value) * 8) - 1) >> 1
result &= num_mask
result *= -1
return result
class CScript(bytes):
__slots__ = ()
@classmethod
def __coerce_instance(cls, other):
if isinstance(other, CScriptOp):
other = bytes([other])
elif isinstance(other, CScriptNum):
if (other.value == 0):
other = bytes([CScriptOp(OP_0)])
else:
other = CScriptNum.encode(other)
elif isinstance(other, int):
if 0 <= other <= 16:
other = bytes([CScriptOp.encode_op_n(other)])
elif other == -1:
other = bytes([OP_1NEGATE])
else:
other = CScriptOp.encode_op_pushdata(bn2vch(other))
elif isinstance(other, (bytes, bytearray)):
other = CScriptOp.encode_op_pushdata(other)
return other
def __add__(self, other):
raise NotImplementedError
def join(self, iterable):
raise NotImplementedError
def __new__(cls, value=b''):
if isinstance(value, bytes) or isinstance(value, bytearray):
return super().__new__(cls, value)
else:
def coerce_iterable(iterable):
for instance in iterable:
yield cls.__coerce_instance(instance)
return super().__new__(cls, b''.join(coerce_iterable(value)))
def raw_iter(self):
i = 0
while i < len(self):
sop_idx = i
opcode = self[i]
i += 1
if opcode > OP_PUSHDATA4:
yield (opcode, None, sop_idx)
else:
datasize = None
pushdata_type = None
if opcode < OP_PUSHDATA1:
pushdata_type = 'PUSHDATA(%d)' % opcode
datasize = opcode
elif opcode == OP_PUSHDATA1:
pushdata_type = 'PUSHDATA1'
if i >= len(self):
raise CScriptInvalidError('PUSHDATA1: missing data length')
datasize = self[i]
i += 1
elif opcode == OP_PUSHDATA2:
pushdata_type = 'PUSHDATA2'
if i + 1 >= len(self):
raise CScriptInvalidError('PUSHDATA2: missing data length')
datasize = self[i] + (self[i + 1] << 8)
i += 2
elif opcode == OP_PUSHDATA4:
pushdata_type = 'PUSHDATA4'
if i + 3 >= len(self):
raise CScriptInvalidError('PUSHDATA4: missing data length')
datasize = self[i] + (self[i + 1] << 8) + (self[i + 2] << 16) + (self[i + 3] << 24)
i += 4
else:
assert False
data = bytes(self[i:i + datasize])
# Check for truncation
if len(data) < datasize:
raise CScriptTruncatedPushDataError('%s: truncated data' % pushdata_type, data)
i += datasize
yield (opcode, data, sop_idx)
def __iter__(self):
for (opcode, data, sop_idx) in self.raw_iter():
if data is not None:
yield data
else:
opcode = CScriptOp(opcode)
if opcode.is_small_int():
yield opcode.decode_op_n()
else:
yield CScriptOp(opcode)
def __repr__(self):
def _repr(o):
if isinstance(o, bytes):
return "x('%s')" % o.hex()
else:
return repr(o)
ops = []
i = iter(self)
while True:
op = None
try:
op = _repr(next(i))
except CScriptTruncatedPushDataError as err:
op = '%s...<ERROR: %s>' % (_repr(err.data), err)
break
except CScriptInvalidError as err:
op = '<ERROR: %s>' % err
break
except StopIteration:
break
finally:
if op is not None:
ops.append(op)
return "CScript([%s])" % ', '.join(ops)
def GetSigOpCount(self, fAccurate):
n = 0
lastOpcode = OP_INVALIDOPCODE
for (opcode, data, sop_idx) in self.raw_iter():
if opcode in (OP_CHECKSIG, OP_CHECKSIGVERIFY):
n += 1
elif opcode in (OP_CHECKMULTISIG, OP_CHECKMULTISIGVERIFY):
if fAccurate and (OP_1 <= lastOpcode <= OP_16):
n += opcode.decode_op_n()
else:
n += 20
lastOpcode = opcode
return n
SIGHASH_DEFAULT = 0 # Taproot-only default, semantics same as SIGHASH_ALL
SIGHASH_ALL = 1
SIGHASH_NONE = 2
SIGHASH_SINGLE = 3
SIGHASH_ANYONECANPAY = 0x80
def FindAndDelete(script, sig):
r = b''
last_sop_idx = sop_idx = 0
skip = True
for (opcode, data, sop_idx) in script.raw_iter():
if not skip:
r += script[last_sop_idx:sop_idx]
last_sop_idx = sop_idx
if script[sop_idx:sop_idx + len(sig)] == sig:
skip = True
else:
skip = False
if not skip:
r += script[last_sop_idx:]
return CScript(r)
def LegacySignatureHash(script, txTo, inIdx, hashtype):
HASH_ONE = b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
if inIdx >= len(txTo.vin):
return (HASH_ONE, "inIdx %d out of range (%d)" % (inIdx, len(txTo.vin)))
txtmp = CTransaction(txTo)
for txin in txtmp.vin:
txin.scriptSig = b''
txtmp.vin[inIdx].scriptSig = FindAndDelete(script, CScript([OP_CODESEPARATOR]))
if (hashtype & 0x1f) == SIGHASH_NONE:
txtmp.vout = []
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
elif (hashtype & 0x1f) == SIGHASH_SINGLE:
outIdx = inIdx
if outIdx >= len(txtmp.vout):
return (HASH_ONE, "outIdx %d out of range (%d)" % (outIdx, len(txtmp.vout)))
tmp = txtmp.vout[outIdx]
txtmp.vout = []
for _ in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
for i in range(len(txtmp.vin)):
if i != inIdx:
txtmp.vin[i].nSequence = 0
if hashtype & SIGHASH_ANYONECANPAY:
tmp = txtmp.vin[inIdx]
txtmp.vin = []
txtmp.vin.append(tmp)
s = txtmp.serialize_without_witness()
s += struct.pack(b"<I", hashtype)
hash = sha256(s)
return (hash, None)
# TODO: Allow cached hashPrevouts/hashSequence/hashOutputs to be provided.
# Performance optimization probably not necessary for python tests, however.
# Note that this corresponds to sigversion == 1 in EvalScript, which is used
# for version 0 witnesses.
def SegwitV0SignatureHash(script, txTo, inIdx, hashtype, amount):
hashPrevouts = 0
hashSequence = 0
hashOutputs = 0
if not (hashtype & SIGHASH_ANYONECANPAY):
serialize_prevouts = bytes()
for i in txTo.vin:
serialize_prevouts += i.prevout.serialize()
hashPrevouts = uint256_from_str(sha256(serialize_prevouts))
if (not (hashtype & SIGHASH_ANYONECANPAY) and (hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_sequence = bytes()
for i in txTo.vin:
serialize_sequence += struct.pack("<I", i.nSequence)
hashSequence = uint256_from_str(sha256(serialize_sequence))
if ((hashtype & 0x1f) != SIGHASH_SINGLE and (hashtype & 0x1f) != SIGHASH_NONE):
serialize_outputs = bytes()
for o in txTo.vout:
serialize_outputs += o.serialize()
hashOutputs = uint256_from_str(sha256(serialize_outputs))
elif ((hashtype & 0x1f) == SIGHASH_SINGLE and inIdx < len(txTo.vout)):
serialize_outputs = txTo.vout[inIdx].serialize()
hashOutputs = uint256_from_str(sha256(serialize_outputs))
ss = bytes()
ss += struct.pack("<i", txTo.nVersion)
ss += ser_uint256(hashPrevouts)
ss += ser_uint256(hashSequence)
ss += txTo.vin[inIdx].prevout.serialize()
ss += ser_string(script)
ss += struct.pack("<q", amount)
ss += struct.pack("<I", txTo.vin[inIdx].nSequence)
ss += ser_uint256(hashOutputs)
ss += struct.pack("<i", txTo.nLockTime)
ss += struct.pack("<I", hashtype)
return sha256(ss)
class TestFrameworkScript(unittest.TestCase):
def test_bn2vch(self):
self.assertEqual(bn2vch(0), bytes([]))
self.assertEqual(bn2vch(1), bytes([0x01]))
self.assertEqual(bn2vch(-1), bytes([0x81]))
self.assertEqual(bn2vch(0x7F), bytes([0x7F]))
self.assertEqual(bn2vch(-0x7F), bytes([0xFF]))
self.assertEqual(bn2vch(0x80), bytes([0x80, 0x00]))
self.assertEqual(bn2vch(-0x80), bytes([0x80, 0x80]))
self.assertEqual(bn2vch(0xFF), bytes([0xFF, 0x00]))
self.assertEqual(bn2vch(-0xFF), bytes([0xFF, 0x80]))
self.assertEqual(bn2vch(0x100), bytes([0x00, 0x01]))
self.assertEqual(bn2vch(-0x100), bytes([0x00, 0x81]))
self.assertEqual(bn2vch(0x7FFF), bytes([0xFF, 0x7F]))
self.assertEqual(bn2vch(-0x8000), bytes([0x00, 0x80, 0x80]))
self.assertEqual(bn2vch(-0x7FFFFF), bytes([0xFF, 0xFF, 0xFF]))
self.assertEqual(bn2vch(0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x00]))
self.assertEqual(bn2vch(-0x80000000), bytes([0x00, 0x00, 0x00, 0x80, 0x80]))
self.assertEqual(bn2vch(0xFFFFFFFF), bytes([0xFF, 0xFF, 0xFF, 0xFF, 0x00]))
self.assertEqual(bn2vch(123456789), bytes([0x15, 0xCD, 0x5B, 0x07]))
self.assertEqual(bn2vch(-54321), bytes([0x31, 0xD4, 0x80]))
def test_cscriptnum_encoding(self):
# round-trip negative and multi-byte CScriptNums
values = [0, 1, -1, -2, 127, 128, -255, 256, (1 << 15) - 1, -(1 << 16), (1 << 24) - 1, (1 << 31), 1 - (1 << 32), 1 << 40, 1500, -1500]
for value in values:
self.assertEqual(CScriptNum.decode(CScriptNum.encode(CScriptNum(value))), value)
def TaprootSignatureHash(txTo, spent_utxos, hash_type, input_index = 0, scriptpath = False, script = CScript(), codeseparator_pos = -1, annex = None, leaf_ver = LEAF_VERSION_TAPSCRIPT):
assert (len(txTo.vin) == len(spent_utxos))
assert (input_index < len(txTo.vin))
out_type = SIGHASH_ALL if hash_type == 0 else hash_type & 3
in_type = hash_type & SIGHASH_ANYONECANPAY
spk = spent_utxos[input_index].scriptPubKey
ss = bytes([0, hash_type]) # epoch, hash_type
ss += struct.pack("<i", txTo.nVersion)
ss += struct.pack("<I", txTo.nLockTime)
if in_type != SIGHASH_ANYONECANPAY:
ss += sha256(b"".join(i.prevout.serialize() for i in txTo.vin))
ss += sha256(b"".join(struct.pack("<q", u.nValue) for u in spent_utxos))
ss += sha256(b"".join(ser_string(u.scriptPubKey) for u in spent_utxos))
ss += sha256(b"".join(struct.pack("<I", i.nSequence) for i in txTo.vin))
if out_type == SIGHASH_ALL:
ss += sha256(b"".join(o.serialize() for o in txTo.vout))
spend_type = 0
if annex is not None:
spend_type |= 1
if (scriptpath):
spend_type |= 2
ss += bytes([spend_type])
if in_type == SIGHASH_ANYONECANPAY:
ss += txTo.vin[input_index].prevout.serialize()
ss += struct.pack("<q", spent_utxos[input_index].nValue)
ss += ser_string(spk)
ss += struct.pack("<I", txTo.vin[input_index].nSequence)
else:
ss += struct.pack("<I", input_index)
if (spend_type & 1):
ss += sha256(ser_string(annex))
if out_type == SIGHASH_SINGLE:
if input_index < len(txTo.vout):
ss += sha256(txTo.vout[input_index].serialize())
else:
ss += bytes(0 for _ in range(32))
if (scriptpath):
ss += TaggedHash("TapLeaf", bytes([leaf_ver]) + ser_string(script))
ss += bytes([0])
ss += struct.pack("<i", codeseparator_pos)
assert len(ss) == 175 - (in_type == SIGHASH_ANYONECANPAY) * 49 - (out_type != SIGHASH_ALL and out_type != SIGHASH_SINGLE) * 32 + (annex is not None) * 32 + scriptpath * 37
return TaggedHash("TapSighash", ss)
def taproot_tree_helper(scripts):
if len(scripts) == 0:
return ([], bytes())
if len(scripts) == 1:
# One entry: treat as a leaf
script = scripts[0]
assert(not callable(script))
if isinstance(script, list):
return taproot_tree_helper(script)
assert(isinstance(script, tuple))
version = LEAF_VERSION_TAPSCRIPT
name = script[0]
code = script[1]
if len(script) == 3:
version = script[2]
assert version & 1 == 0
assert isinstance(code, bytes)
h = TaggedHash("TapLeaf", bytes([version]) + ser_string(code))
if name is None:
return ([], h)
return ([(name, version, code, bytes())], h)
elif len(scripts) == 2 and callable(scripts[1]):
# Two entries, and the right one is a function
left, left_h = taproot_tree_helper(scripts[0:1])
right_h = scripts[1](left_h)
left = [(name, version, script, control + right_h) for name, version, script, control in left]
right = []
else:
# Two or more entries: descend into each side
split_pos = len(scripts) // 2
left, left_h = taproot_tree_helper(scripts[0:split_pos])
right, right_h = taproot_tree_helper(scripts[split_pos:])
left = [(name, version, script, control + right_h) for name, version, script, control in left]
right = [(name, version, script, control + left_h) for name, version, script, control in right]
if right_h < left_h:
right_h, left_h = left_h, right_h
h = TaggedHash("TapBranch", left_h + right_h)
return (left + right, h)
# A TaprootInfo object has the following fields:
# - scriptPubKey: the scriptPubKey (witness v1 CScript)
# - internal_pubkey: the internal pubkey (32 bytes)
# - negflag: whether the pubkey in the scriptPubKey was negated from internal_pubkey+tweak*G (bool).
# - tweak: the tweak (32 bytes)
# - leaves: a dict of name -> TaprootLeafInfo objects for all known leaves
TaprootInfo = namedtuple("TaprootInfo", "scriptPubKey,internal_pubkey,negflag,tweak,leaves")
# A TaprootLeafInfo object has the following fields:
# - script: the leaf script (CScript or bytes)
# - version: the leaf version (0xc0 for BIP342 tapscript)
# - merklebranch: the merkle branch to use for this leaf (32*N bytes)
TaprootLeafInfo = namedtuple("TaprootLeafInfo", "script,version,merklebranch")
def taproot_construct(pubkey, scripts=None):
if scripts is None:
scripts = []
ret, h = taproot_tree_helper(scripts)
tweak = TaggedHash("TapTweak", pubkey + h)
tweaked, negated = tweak_add_pubkey(pubkey, tweak)
leaves = dict((name, TaprootLeafInfo(script, version, merklebranch)) for name, version, script, merklebranch in ret)
return TaprootInfo(CScript([OP_1, tweaked]), pubkey, negated + 0, tweak, leaves)
def is_op_success(o):
return o == 0x50 or o == 0x62 or o == 0x89 or o == 0x8a or o == 0x8d or o == 0x8e or (o >= 0x7e and o <= 0x81) or (o >= 0x83 and o <= 0x86) or (o >= 0x95 and o <= 0x99) or (o >= 0xbb and o <= 0xfe)
| true
| true
|
1c4989fdbdd50273e32b2fa29a924ec8d6080b4c
| 1,729
|
py
|
Python
|
joss_paper/figures/gen_phold_space_time_plot.py
|
KarrLab/desim
|
6f189d8c8e850e092d816f6be3d6f87b4f983ac2
|
[
"MIT"
] | 16
|
2019-12-12T15:49:17.000Z
|
2022-03-31T20:34:36.000Z
|
joss_paper/figures/gen_phold_space_time_plot.py
|
KarrLab/desim
|
6f189d8c8e850e092d816f6be3d6f87b4f983ac2
|
[
"MIT"
] | 65
|
2019-08-15T14:50:38.000Z
|
2020-12-17T14:36:04.000Z
|
joss_paper/figures/gen_phold_space_time_plot.py
|
KarrLab/desim
|
6f189d8c8e850e092d816f6be3d6f87b4f983ac2
|
[
"MIT"
] | 5
|
2020-07-16T22:15:47.000Z
|
2021-08-16T02:16:17.000Z
|
""" Generate a space-time plot of PHOLD
:Author: Arthur Goldberg <Arthur.Goldberg@mssm.edu>
:Date: 2020-06-22
:Copyright: 2020, Karr Lab
:License: MIT
"""
from argparse import Namespace
import os
import tempfile
from de_sim.examples.phold import RunPhold
from de_sim.testing.utilities_for_testing import unset_env_var
from de_sim.visualize import SpaceTime
from wc_utils.util.environ import EnvironUtils
import de_sim
def run_phold(max_time, num_phold_procs=3, frac_self_events=0.5):
""" Run PHOLD, and generate a plot log
Args:
extra (:obj:`float`): simulation duration
num_phold_procs (:obj:`int`, optional): number of PHOLD processes to run
frac_self_events (:obj:`float`, optional): fraction of events sent to self
"""
args = Namespace(max_time=max_time, num_phold_procs=num_phold_procs,
frac_self_events=frac_self_events)
RunPhold.main(args)
def create_phold_space_time_diagram():
""" Run PHOLD, and use plot log to generate a space-time diagram """
plot_log = os.path.expanduser('~/.wc/log/de_sim.plot.log')
try:
os.remove(plot_log)
except FileNotFoundError:
pass
run_phold(8)
space_time = SpaceTime()
space_time.get_data(plot_log)
temp_dir = tempfile.TemporaryDirectory()
space_time_plot = os.path.join(temp_dir.name, "phold_space_time_plot.pdf")
with unset_env_var('DISPLAY'):
space_time.plot_data(space_time_plot)
print('space-time diagram written to', space_time_plot)
with EnvironUtils.temp_config_env(((['de_sim', 'log_events'], 'True'),
(['debug_logs', 'handlers', 'plot.file', 'level'], 'debug'))):
create_phold_space_time_diagram()
| 32.622642
| 97
| 0.70561
|
from argparse import Namespace
import os
import tempfile
from de_sim.examples.phold import RunPhold
from de_sim.testing.utilities_for_testing import unset_env_var
from de_sim.visualize import SpaceTime
from wc_utils.util.environ import EnvironUtils
import de_sim
def run_phold(max_time, num_phold_procs=3, frac_self_events=0.5):
args = Namespace(max_time=max_time, num_phold_procs=num_phold_procs,
frac_self_events=frac_self_events)
RunPhold.main(args)
def create_phold_space_time_diagram():
plot_log = os.path.expanduser('~/.wc/log/de_sim.plot.log')
try:
os.remove(plot_log)
except FileNotFoundError:
pass
run_phold(8)
space_time = SpaceTime()
space_time.get_data(plot_log)
temp_dir = tempfile.TemporaryDirectory()
space_time_plot = os.path.join(temp_dir.name, "phold_space_time_plot.pdf")
with unset_env_var('DISPLAY'):
space_time.plot_data(space_time_plot)
print('space-time diagram written to', space_time_plot)
with EnvironUtils.temp_config_env(((['de_sim', 'log_events'], 'True'),
(['debug_logs', 'handlers', 'plot.file', 'level'], 'debug'))):
create_phold_space_time_diagram()
| true
| true
|
1c4989ff4081d21eafb011936a27765d85b3e3f2
| 2,127
|
py
|
Python
|
webapp/libs/plugins/saplugin.py
|
crocodilered/TheObjectRating
|
2f44eb9cf7f39d3ab95cbc4ea720995a29344349
|
[
"MIT"
] | null | null | null |
webapp/libs/plugins/saplugin.py
|
crocodilered/TheObjectRating
|
2f44eb9cf7f39d3ab95cbc4ea720995a29344349
|
[
"MIT"
] | null | null | null |
webapp/libs/plugins/saplugin.py
|
crocodilered/TheObjectRating
|
2f44eb9cf7f39d3ab95cbc4ea720995a29344349
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import cherrypy
from cherrypy.process import wspbus, plugins
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
__all__ = ['SAEnginePlugin']
class SAEnginePlugin(plugins.SimplePlugin):
def __init__(self, bus, connection_string=None):
"""
The plugin is registered to the CherryPy engine and therefore
is part of the bus (the engine *is* a bus) registery.
We use this plugin to create the SA engine. At the same time,
when the plugin starts we create the tables into the database
using the mapped class of the global metadata.
"""
plugins.SimplePlugin.__init__(self, bus)
self.sa_engine = None
self.connection_string = connection_string
self.session = scoped_session(sessionmaker(autoflush=True, autocommit=False))
def start(self):
self.bus.log('Starting up DB access')
self.sa_engine = create_engine(self.connection_string, echo=False)
self.bus.subscribe("bind-session", self.bind)
self.bus.subscribe("commit-session", self.commit)
def stop(self):
self.bus.log('Stopping down DB access')
self.bus.unsubscribe("bind-session", self.bind)
self.bus.unsubscribe("commit-session", self.commit)
if self.sa_engine:
self.sa_engine.dispose()
self.sa_engine = None
def bind(self):
"""
Whenever this plugin receives the 'bind-session' command, it applies
this method and to bind the current session to the engine.
It then returns the session to the caller.
"""
self.session.configure(bind=self.sa_engine)
return self.session
def commit(self):
"""
Commits the current transaction or rollbacks if an error occurs.
In all cases, the current session is unbound and therefore
not usable any longer.
"""
try:
self.session.commit()
except:
self.session.rollback()
raise
finally:
self.session.remove()
| 33.761905
| 85
| 0.64504
|
import cherrypy
from cherrypy.process import wspbus, plugins
from sqlalchemy import create_engine
from sqlalchemy.orm import scoped_session, sessionmaker
__all__ = ['SAEnginePlugin']
class SAEnginePlugin(plugins.SimplePlugin):
def __init__(self, bus, connection_string=None):
plugins.SimplePlugin.__init__(self, bus)
self.sa_engine = None
self.connection_string = connection_string
self.session = scoped_session(sessionmaker(autoflush=True, autocommit=False))
def start(self):
self.bus.log('Starting up DB access')
self.sa_engine = create_engine(self.connection_string, echo=False)
self.bus.subscribe("bind-session", self.bind)
self.bus.subscribe("commit-session", self.commit)
def stop(self):
self.bus.log('Stopping down DB access')
self.bus.unsubscribe("bind-session", self.bind)
self.bus.unsubscribe("commit-session", self.commit)
if self.sa_engine:
self.sa_engine.dispose()
self.sa_engine = None
def bind(self):
self.session.configure(bind=self.sa_engine)
return self.session
def commit(self):
try:
self.session.commit()
except:
self.session.rollback()
raise
finally:
self.session.remove()
| true
| true
|
1c498a3ccb22414034d40442b3f457d23a2b3520
| 3,508
|
py
|
Python
|
portfolio/Python/scrapy/americanrv/streetsideauto.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/americanrv/streetsideauto.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | null | null | null |
portfolio/Python/scrapy/americanrv/streetsideauto.py
|
0--key/lib
|
ba7a85dda2b208adc290508ca617bdc55a5ded22
|
[
"Apache-2.0"
] | 5
|
2016-03-22T07:40:46.000Z
|
2021-05-30T16:12:21.000Z
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
import csv
from product_spiders.items import Product, ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class StreetSideAutoSpider(BaseSpider):
name = 'streetsideauto.com'
allowed_domains = ['www.streetsideauto.com']
start_urls = ('http://www.streetsideauto.com/',)
def __init__(self, *args, **kwargs):
super(StreetSideAutoSpider, self).__init__(*args, **kwargs)
csv_file = csv.reader(open(os.path.join(HERE, 'americanrv_products.csv')))
csv_file.next()
self.product_ids = {}
for row in csv_file:
ids = set()
ids.add(row[0])
self.product_ids[row[0]] = {'mfrgid': row[2], 'ids': frozenset(ids)}
def start_requests(self):
for sku, data in self.product_ids.items():
for id in data['ids']:
url = 'http://www.streetsideauto.com/search.asp?keywords=' + re.sub(' ','+', id)
req = Request(url)
req.meta['sku'] = sku
req.meta['mfrgid'] = data['mfrgid']
yield req
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
# pagination
# next_page = hxs.select(u'//dl[@class="pages"]/dd/a[contains(text(),"Next")]/@href').extract()
# if next_page:
# next_page = urljoin_rfc(get_base_url(response), next_page[0])
# req = Request(next_page, meta={'sku': response.meta['sku']})
# yield req
# products
products = hxs.select(u'//div[@class="p-summary leaf"]/a[@class="part-title"]/@href').extract()
for url in products:
url = urljoin_rfc(get_base_url(response), url)
req = Request(url, callback=self.parse_product)
req.meta['sku'] = response.meta['sku']
req.meta['mfrgid'] = response.meta['mfrgid']
yield req
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
product_loader = ProductLoader(item=Product(), response=response)
product_loader.add_xpath('price', u'//div[@id="conv-box"]//dd[@class="amount"]/text()')
if not product_loader.get_output_value('price'):
product_loader.add_xpath('price', u'//dl[@class="ssa-price-dl"]/dd[@class="ssa-price"]/text()')
product_loader.add_value('url', response.url)
product_loader.add_value('sku', response.meta['sku'])
product_loader.add_value('identifier', response.meta['sku'].lower())
name = hxs.select(u'//div[@class="right-column-left"]/div[@class="title"]/h2/text()').extract()[0].strip()
product_loader.add_value('name', name)
# sku = response.meta['sku'].lower().split(' ')
# name = product_loader.get_output_value('name').lower()
# sku = filter(lambda x: x != '' and x in name, sku)
part_number = hxs.select(u'//div[@class="title"]/h2/span/text()').re('Part No. (.*)')[0]
mfrgid = response.meta['mfrgid']
if part_number == mfrgid and product_loader.get_output_value('price'):
yield product_loader.load_item()
| 38.977778
| 114
| 0.61488
|
import re
import os
from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse
from scrapy.utils.response import get_base_url
from scrapy.utils.url import urljoin_rfc
import csv
from product_spiders.items import Product, ProductLoader
from scrapy import log
HERE = os.path.abspath(os.path.dirname(__file__))
class StreetSideAutoSpider(BaseSpider):
name = 'streetsideauto.com'
allowed_domains = ['www.streetsideauto.com']
start_urls = ('http://www.streetsideauto.com/',)
def __init__(self, *args, **kwargs):
super(StreetSideAutoSpider, self).__init__(*args, **kwargs)
csv_file = csv.reader(open(os.path.join(HERE, 'americanrv_products.csv')))
csv_file.next()
self.product_ids = {}
for row in csv_file:
ids = set()
ids.add(row[0])
self.product_ids[row[0]] = {'mfrgid': row[2], 'ids': frozenset(ids)}
def start_requests(self):
for sku, data in self.product_ids.items():
for id in data['ids']:
url = 'http://www.streetsideauto.com/search.asp?keywords=' + re.sub(' ','+', id)
req = Request(url)
req.meta['sku'] = sku
req.meta['mfrgid'] = data['mfrgid']
yield req
def parse(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
products = hxs.select(u'//div[@class="p-summary leaf"]/a[@class="part-title"]/@href').extract()
for url in products:
url = urljoin_rfc(get_base_url(response), url)
req = Request(url, callback=self.parse_product)
req.meta['sku'] = response.meta['sku']
req.meta['mfrgid'] = response.meta['mfrgid']
yield req
def parse_product(self, response):
if not isinstance(response, HtmlResponse):
return
hxs = HtmlXPathSelector(response)
product_loader = ProductLoader(item=Product(), response=response)
product_loader.add_xpath('price', u'//div[@id="conv-box"]//dd[@class="amount"]/text()')
if not product_loader.get_output_value('price'):
product_loader.add_xpath('price', u'//dl[@class="ssa-price-dl"]/dd[@class="ssa-price"]/text()')
product_loader.add_value('url', response.url)
product_loader.add_value('sku', response.meta['sku'])
product_loader.add_value('identifier', response.meta['sku'].lower())
name = hxs.select(u'//div[@class="right-column-left"]/div[@class="title"]/h2/text()').extract()[0].strip()
product_loader.add_value('name', name)
part_number = hxs.select(u'//div[@class="title"]/h2/span/text()').re('Part No. (.*)')[0]
mfrgid = response.meta['mfrgid']
if part_number == mfrgid and product_loader.get_output_value('price'):
yield product_loader.load_item()
| true
| true
|
1c498d0e059a2c2fff70bac574fa1aba4e9dd83e
| 59
|
py
|
Python
|
SPLIT.py
|
anayakoti/FirstSample
|
8ef05772991644e63a4fd6759458f449cd2b00c0
|
[
"bzip2-1.0.6"
] | null | null | null |
SPLIT.py
|
anayakoti/FirstSample
|
8ef05772991644e63a4fd6759458f449cd2b00c0
|
[
"bzip2-1.0.6"
] | null | null | null |
SPLIT.py
|
anayakoti/FirstSample
|
8ef05772991644e63a4fd6759458f449cd2b00c0
|
[
"bzip2-1.0.6"
] | null | null | null |
WORD="tHIS IS ANUDEEP";
lister=WORD.spit();
print(lister);
| 14.75
| 23
| 0.711864
|
WORD="tHIS IS ANUDEEP";
lister=WORD.spit();
print(lister);
| true
| true
|
1c498da49fdf2f6ac2d0d58c9f1b429a18e01773
| 9,475
|
py
|
Python
|
sdk/python/pulumi_azure_native/hanaonazure/get_hana_instance.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/hanaonazure/get_hana_instance.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/hanaonazure/get_hana_instance.py
|
polivbr/pulumi-azure-native
|
09571f3bf6bdc4f3621aabefd1ba6c0d4ecfb0e7
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetHanaInstanceResult',
'AwaitableGetHanaInstanceResult',
'get_hana_instance',
]
@pulumi.output_type
class GetHanaInstanceResult:
"""
HANA instance info on Azure (ARM properties and HANA properties)
"""
def __init__(__self__, hana_instance_id=None, hardware_profile=None, hw_revision=None, id=None, location=None, name=None, network_profile=None, os_profile=None, partner_node_id=None, power_state=None, provisioning_state=None, proximity_placement_group=None, storage_profile=None, tags=None, type=None):
if hana_instance_id and not isinstance(hana_instance_id, str):
raise TypeError("Expected argument 'hana_instance_id' to be a str")
pulumi.set(__self__, "hana_instance_id", hana_instance_id)
if hardware_profile and not isinstance(hardware_profile, dict):
raise TypeError("Expected argument 'hardware_profile' to be a dict")
pulumi.set(__self__, "hardware_profile", hardware_profile)
if hw_revision and not isinstance(hw_revision, str):
raise TypeError("Expected argument 'hw_revision' to be a str")
pulumi.set(__self__, "hw_revision", hw_revision)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_profile and not isinstance(network_profile, dict):
raise TypeError("Expected argument 'network_profile' to be a dict")
pulumi.set(__self__, "network_profile", network_profile)
if os_profile and not isinstance(os_profile, dict):
raise TypeError("Expected argument 'os_profile' to be a dict")
pulumi.set(__self__, "os_profile", os_profile)
if partner_node_id and not isinstance(partner_node_id, str):
raise TypeError("Expected argument 'partner_node_id' to be a str")
pulumi.set(__self__, "partner_node_id", partner_node_id)
if power_state and not isinstance(power_state, str):
raise TypeError("Expected argument 'power_state' to be a str")
pulumi.set(__self__, "power_state", power_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if proximity_placement_group and not isinstance(proximity_placement_group, str):
raise TypeError("Expected argument 'proximity_placement_group' to be a str")
pulumi.set(__self__, "proximity_placement_group", proximity_placement_group)
if storage_profile and not isinstance(storage_profile, dict):
raise TypeError("Expected argument 'storage_profile' to be a dict")
pulumi.set(__self__, "storage_profile", storage_profile)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="hanaInstanceId")
def hana_instance_id(self) -> str:
"""
Specifies the HANA instance unique ID.
"""
return pulumi.get(self, "hana_instance_id")
@property
@pulumi.getter(name="hardwareProfile")
def hardware_profile(self) -> Optional['outputs.HardwareProfileResponse']:
"""
Specifies the hardware settings for the HANA instance.
"""
return pulumi.get(self, "hardware_profile")
@property
@pulumi.getter(name="hwRevision")
def hw_revision(self) -> str:
"""
Hardware revision of a HANA instance
"""
return pulumi.get(self, "hw_revision")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource ID
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> Optional['outputs.NetworkProfileResponse']:
"""
Specifies the network settings for the HANA instance.
"""
return pulumi.get(self, "network_profile")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional['outputs.OSProfileResponse']:
"""
Specifies the operating system settings for the HANA instance.
"""
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="partnerNodeId")
def partner_node_id(self) -> Optional[str]:
"""
ARM ID of another HanaInstance that will share a network with this HanaInstance
"""
return pulumi.get(self, "partner_node_id")
@property
@pulumi.getter(name="powerState")
def power_state(self) -> str:
"""
Resource power state
"""
return pulumi.get(self, "power_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
State of provisioning of the HanaInstance
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="proximityPlacementGroup")
def proximity_placement_group(self) -> str:
"""
Resource proximity placement group
"""
return pulumi.get(self, "proximity_placement_group")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional['outputs.StorageProfileResponse']:
"""
Specifies the storage settings for the HANA instance disks.
"""
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
class AwaitableGetHanaInstanceResult(GetHanaInstanceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetHanaInstanceResult(
hana_instance_id=self.hana_instance_id,
hardware_profile=self.hardware_profile,
hw_revision=self.hw_revision,
id=self.id,
location=self.location,
name=self.name,
network_profile=self.network_profile,
os_profile=self.os_profile,
partner_node_id=self.partner_node_id,
power_state=self.power_state,
provisioning_state=self.provisioning_state,
proximity_placement_group=self.proximity_placement_group,
storage_profile=self.storage_profile,
tags=self.tags,
type=self.type)
def get_hana_instance(hana_instance_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHanaInstanceResult:
"""
HANA instance info on Azure (ARM properties and HANA properties)
API Version: 2017-11-03-preview.
:param str hana_instance_name: Name of the SAP HANA on Azure instance.
:param str resource_group_name: Name of the resource group.
"""
__args__ = dict()
__args__['hanaInstanceName'] = hana_instance_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:hanaonazure:getHanaInstance', __args__, opts=opts, typ=GetHanaInstanceResult).value
return AwaitableGetHanaInstanceResult(
hana_instance_id=__ret__.hana_instance_id,
hardware_profile=__ret__.hardware_profile,
hw_revision=__ret__.hw_revision,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
network_profile=__ret__.network_profile,
os_profile=__ret__.os_profile,
partner_node_id=__ret__.partner_node_id,
power_state=__ret__.power_state,
provisioning_state=__ret__.provisioning_state,
proximity_placement_group=__ret__.proximity_placement_group,
storage_profile=__ret__.storage_profile,
tags=__ret__.tags,
type=__ret__.type)
| 37.9
| 306
| 0.661214
|
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
__all__ = [
'GetHanaInstanceResult',
'AwaitableGetHanaInstanceResult',
'get_hana_instance',
]
@pulumi.output_type
class GetHanaInstanceResult:
def __init__(__self__, hana_instance_id=None, hardware_profile=None, hw_revision=None, id=None, location=None, name=None, network_profile=None, os_profile=None, partner_node_id=None, power_state=None, provisioning_state=None, proximity_placement_group=None, storage_profile=None, tags=None, type=None):
if hana_instance_id and not isinstance(hana_instance_id, str):
raise TypeError("Expected argument 'hana_instance_id' to be a str")
pulumi.set(__self__, "hana_instance_id", hana_instance_id)
if hardware_profile and not isinstance(hardware_profile, dict):
raise TypeError("Expected argument 'hardware_profile' to be a dict")
pulumi.set(__self__, "hardware_profile", hardware_profile)
if hw_revision and not isinstance(hw_revision, str):
raise TypeError("Expected argument 'hw_revision' to be a str")
pulumi.set(__self__, "hw_revision", hw_revision)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_profile and not isinstance(network_profile, dict):
raise TypeError("Expected argument 'network_profile' to be a dict")
pulumi.set(__self__, "network_profile", network_profile)
if os_profile and not isinstance(os_profile, dict):
raise TypeError("Expected argument 'os_profile' to be a dict")
pulumi.set(__self__, "os_profile", os_profile)
if partner_node_id and not isinstance(partner_node_id, str):
raise TypeError("Expected argument 'partner_node_id' to be a str")
pulumi.set(__self__, "partner_node_id", partner_node_id)
if power_state and not isinstance(power_state, str):
raise TypeError("Expected argument 'power_state' to be a str")
pulumi.set(__self__, "power_state", power_state)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if proximity_placement_group and not isinstance(proximity_placement_group, str):
raise TypeError("Expected argument 'proximity_placement_group' to be a str")
pulumi.set(__self__, "proximity_placement_group", proximity_placement_group)
if storage_profile and not isinstance(storage_profile, dict):
raise TypeError("Expected argument 'storage_profile' to be a dict")
pulumi.set(__self__, "storage_profile", storage_profile)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="hanaInstanceId")
def hana_instance_id(self) -> str:
return pulumi.get(self, "hana_instance_id")
@property
@pulumi.getter(name="hardwareProfile")
def hardware_profile(self) -> Optional['outputs.HardwareProfileResponse']:
return pulumi.get(self, "hardware_profile")
@property
@pulumi.getter(name="hwRevision")
def hw_revision(self) -> str:
return pulumi.get(self, "hw_revision")
@property
@pulumi.getter
def id(self) -> str:
return pulumi.get(self, "id")
@property
@pulumi.getter
def location(self) -> Optional[str]:
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> Optional['outputs.NetworkProfileResponse']:
return pulumi.get(self, "network_profile")
@property
@pulumi.getter(name="osProfile")
def os_profile(self) -> Optional['outputs.OSProfileResponse']:
return pulumi.get(self, "os_profile")
@property
@pulumi.getter(name="partnerNodeId")
def partner_node_id(self) -> Optional[str]:
return pulumi.get(self, "partner_node_id")
@property
@pulumi.getter(name="powerState")
def power_state(self) -> str:
return pulumi.get(self, "power_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="proximityPlacementGroup")
def proximity_placement_group(self) -> str:
return pulumi.get(self, "proximity_placement_group")
@property
@pulumi.getter(name="storageProfile")
def storage_profile(self) -> Optional['outputs.StorageProfileResponse']:
return pulumi.get(self, "storage_profile")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
return pulumi.get(self, "type")
class AwaitableGetHanaInstanceResult(GetHanaInstanceResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetHanaInstanceResult(
hana_instance_id=self.hana_instance_id,
hardware_profile=self.hardware_profile,
hw_revision=self.hw_revision,
id=self.id,
location=self.location,
name=self.name,
network_profile=self.network_profile,
os_profile=self.os_profile,
partner_node_id=self.partner_node_id,
power_state=self.power_state,
provisioning_state=self.provisioning_state,
proximity_placement_group=self.proximity_placement_group,
storage_profile=self.storage_profile,
tags=self.tags,
type=self.type)
def get_hana_instance(hana_instance_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetHanaInstanceResult:
__args__ = dict()
__args__['hanaInstanceName'] = hana_instance_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:hanaonazure:getHanaInstance', __args__, opts=opts, typ=GetHanaInstanceResult).value
return AwaitableGetHanaInstanceResult(
hana_instance_id=__ret__.hana_instance_id,
hardware_profile=__ret__.hardware_profile,
hw_revision=__ret__.hw_revision,
id=__ret__.id,
location=__ret__.location,
name=__ret__.name,
network_profile=__ret__.network_profile,
os_profile=__ret__.os_profile,
partner_node_id=__ret__.partner_node_id,
power_state=__ret__.power_state,
provisioning_state=__ret__.provisioning_state,
proximity_placement_group=__ret__.proximity_placement_group,
storage_profile=__ret__.storage_profile,
tags=__ret__.tags,
type=__ret__.type)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.