id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
11526343
|
import os
settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static/"),
"cookie_secret": "hotpoorinchina",
"debug": True,
}
|
11526359
|
from keras.models import Model
from keras.layers import Dense, Dropout
from keras.applications.nasnet import NASNetLarge
from config import IMAGE_SIZE
class NimaModel(object):
def __init__(self):
self.base_model = NASNetLarge(input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3),
include_top=False, weights=None, pooling='avg')
x = Dropout(0.75)(self.base_model.output)
x = Dense(10, activation='softmax', name='toplayer')(x)
self.model = Model(self.base_model.input, x)
|
11526405
|
from import_export import resources, fields
from .models import ChoiceLibrary
class ChoiceResource(resources.ModelResource):
choice_question = fields.Field(attribute='choice_question', column_name='问题描述')
choice_a = fields.Field(attribute='choice_a', column_name='选项A')
choice_b = fields.Field(attribute='choice_b', column_name='选项B')
choice_c = fields.Field(attribute='choice_c', column_name='选项C')
choice_d = fields.Field(attribute='choice_d', column_name='选项D')
choice_answer = fields.Field(attribute='choice_answer', column_name='正确答案')
choice_score = fields.Field(attribute='choice_score', column_name='分数')
class Meta:
model = ChoiceLibrary
fields = ( 'choice_question', 'choice_a', 'choice_b', 'choice_c', 'choice_d', 'choice_answer', 'choice_score')
exclude = ('id')
import_id_fields = ['choice_question']
|
11526417
|
from collections import defaultdict
from datetime import datetime, timedelta, timezone
from unittest.mock import MagicMock
import pytest
CERT1_ZO_NE = {'CertificateArn': 'arn:aws:acm:eu-west-1:cert1',
'CreatedAt': datetime(2016, 4, 1, 12, 13, 14, tzinfo=timezone.utc),
'DomainName': '*.zo.ne',
'DomainValidationOptions': [
{'DomainName': '*.zo.ne',
'ValidationDomain': 'zo.ne',
'ValidationEmails': [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>']}, ],
'InUseBy': ['arn:aws:elasticloadbalancing:eu-west-1:lb'],
'IssuedAt': datetime(2016, 4, 1, 12, 14, 14, tzinfo=timezone.utc),
'Issuer': 'SenzaTest',
'KeyAlgorithm': 'RSA-2048',
'NotAfter': datetime(2020, 4, 1, 12, 14, 14, tzinfo=timezone.utc),
'NotBefore': datetime(2016, 4, 1, 12, 14, 14, tzinfo=timezone.utc),
'Serial': '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00',
'SignatureAlgorithm': 'SHA256WITHRSA',
'Status': 'ISSUED',
'Subject': 'CN=*.zo.ne',
'SubjectAlternativeNames': []}
CERT1_ZO_NE_REVOKED = {'CertificateArn': 'arn:aws:acm:eu-west-1:cert1',
'CreatedAt': datetime(2016, 4, 1, 12, 13, 14, tzinfo=timezone.utc),
'DomainName': '*.zo.ne',
'DomainValidationOptions': [
{'DomainName': '*.zo.ne',
'ValidationDomain': 'zo.ne',
'ValidationEmails': [
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>',
'<EMAIL>']}, ],
'InUseBy': [
'arn:aws:elasticloadbalancing:eu-west-1:lb'],
'IssuedAt': datetime(2016, 4, 1, 12, 14, 14, tzinfo=timezone.utc),
'Issuer': 'SenzaTest',
'KeyAlgorithm': 'RSA-2048',
'NotAfter': datetime(2020, 4, 1, 12, 14, 14, tzinfo=timezone.utc),
'NotBefore': datetime(2016, 4, 1, 12, 14, 14, tzinfo=timezone.utc),
'Serial': '00:00:00:00:00:00:00:00:00:00:00:00:00:00:00:00',
'SignatureAlgorithm': 'SHA256WITHRSA',
'Status': 'REVOKED',
'Subject': 'CN=*.zo.ne',
'SubjectAlternativeNames': []}
HOSTED_ZONE_EXAMPLE_NET = {'Config': {'PrivateZone': False},
'CallerReference': '0000',
'ResourceRecordSetCount': 42,
'Id': '/hostedzone/123',
'Name': 'example.net.'}
HOSTED_ZONE_EXAMPLE_ORG = {'Config': {'PrivateZone': False},
'CallerReference': '0000',
'ResourceRecordSetCount': 42,
'Id': '/hostedzone/123',
'Name': 'example.org.'}
HOSTED_ZONE_ZO_NE = {'Config': {'PrivateZone': False},
'CallerReference': '0000',
'ResourceRecordSetCount': 23,
'Id': '/hostedzone/123456',
'Name': 'zo.ne.'}
HOSTED_ZONE_ZO_NE_COM = {'Config': {'PrivateZone': False},
'CallerReference': '0000',
'ResourceRecordSetCount': 23,
'Id': '/hostedzone/123456',
'Name': 'zo.ne.com.'}
HOSTED_ZONE_ZO_NE_DEV = {'Config': {'PrivateZone': False},
'CallerReference': '0000',
'ResourceRecordSetCount': 23,
'Id': '/hostedzone/123456',
'Name': 'zo.ne.dev.'}
SERVER_CERT_ZO_NE = MagicMock(name='zo-ne')
SERVER_CERT_ZO_NE.server_certificate_metadata = {'Arn': 'arn:aws:123',
'ServerCertificateName': 'zo-ne',
'Expiration': datetime(2025, 4, 1, 12, 14, 14,
tzinfo=timezone(timedelta(hours=2))),
'Path': '/',
'ServerCertificateId': '000',
'UploadDate': datetime(2017, 3, 1, 12, 14, 14,
tzinfo=timezone.utc)}
@pytest.fixture
def boto_client(monkeypatch):
mocks = defaultdict(lambda: MagicMock())
mocks['cloudformation'] = MagicMock()
mocks['cloudformation'].list_stacks.return_value = {'StackSummaries': [
{'StackName': 'test-1',
'StackId': 'arn:aws:cf:eu-1:test',
'CreationTime': '2016-06-14'}]
}
summary = [{'LastUpdatedTimestamp': datetime(2016, 7, 20, 7, 3, 7,
108000,
tzinfo=timezone.utc),
'LogicalResourceId': 'AppLoadBalancer',
'PhysicalResourceId': 'myapp1-1',
'ResourceStatus': 'CREATE_COMPLETE',
'ResourceType': 'AWS::ElasticLoadBalancing::LoadBalancer'},
{'LastUpdatedTimestamp': datetime(2016, 7, 20, 7, 3,
45, 70000,
tzinfo=timezone.utc),
'LogicalResourceId': 'AppLoadBalancerMainDomain',
'PhysicalResourceId': 'myapp1.example.com',
'ResourceStatus': 'CREATE_COMPLETE',
'ResourceType': 'AWS::Route53::RecordSet'},
{'LastUpdatedTimestamp': datetime(2016, 7, 20, 7, 3,
43, 871000,
tzinfo=timezone.utc),
'LogicalResourceId': 'AppLoadBalancerVersionDomain',
'PhysicalResourceId': 'myapp1-1.example.com',
'ResourceStatus': 'CREATE_COMPLETE',
'ResourceType': 'AWS::Route53::RecordSet'},
{'LastUpdatedTimestamp': datetime(2016, 7, 20, 7, 7,
38, 495000,
tzinfo=timezone.utc),
'LogicalResourceId': 'AppServer',
'PhysicalResourceId': 'myapp1-1-AppServer-00000',
'ResourceStatus': 'CREATE_COMPLETE',
'ResourceType': 'AWS::AutoScaling::AutoScalingGroup'},
{'LastUpdatedTimestamp': datetime(2016, 7, 20, 7, 5,
10, 48000,
tzinfo=timezone.utc),
'LogicalResourceId': 'AppServerConfig',
'PhysicalResourceId': 'myapp1-1-AppServerConfig-00000',
'ResourceStatus': 'CREATE_COMPLETE',
'ResourceType': 'AWS::AutoScaling::LaunchConfiguration'},
{'LastUpdatedTimestamp': datetime(2016, 7, 20, 7, 5, 6,
745000,
tzinfo=timezone.utc),
'LogicalResourceId': 'AppServerInstanceProfile',
'PhysicalResourceId': 'myapp1-1-AppServerInstanceProfile-000',
'ResourceStatus': 'CREATE_COMPLETE',
'ResourceType': 'AWS::IAM::InstanceProfile'}]
response = {'ResponseMetadata': {'HTTPStatusCode': 200,
'RequestId': '0000'},
'StackResourceSummaries': summary}
mocks['cloudformation'].list_stack_resources.return_value = response
mocks['route53'] = MagicMock()
mocks['route53'].list_hosted_zones.return_value = {
'HostedZones': [HOSTED_ZONE_ZO_NE],
'IsTruncated': False,
'MaxItems': '100'}
mocks['route53'].list_resource_record_sets.return_value = {
'IsTruncated': False,
'MaxItems': '100',
'ResourceRecordSets': [
{'Name': 'example.org.',
'ResourceRecords': [{'Value': 'ns.awsdns.com.'},
{'Value': 'ns.awsdns.org.'}],
'TTL': 172800,
'Type': 'NS'},
{'Name': 'test-1.example.org.',
'ResourceRecords': [
{'Value': 'test-1-123.myregion.elb.amazonaws.com'}],
'TTL': 20,
'Type': 'CNAME'},
{'Name': 'mydomain.example.org.',
'ResourceRecords': [{'Value': 'test-1.example.org'}],
'SetIdentifier': 'test-1',
'TTL': 20,
'Type': 'CNAME',
'Weight': 20},
{'Name': 'test-2.example.org.',
'AliasTarget': {'DNSName': 'test-2-123.myregion.elb.amazonaws.com'},
'TTL': 20,
'Type': 'A'},
]}
def my_client(rtype, *args, **kwargs):
if rtype == 'acm':
acm = mocks['acm']
summary_list = {'CertificateSummaryList': [
{'CertificateArn': 'arn:aws:acm:eu-west-1:cert1'},
{'CertificateArn': 'arn:aws:acm:eu-west-1:cert2'}]}
mocks['acm'].list_certificates.return_value = summary_list
acm.describe_certificate.side_effect = [
{'Certificate': CERT1_ZO_NE},
{'Certificate': ''}]
return acm
elif rtype == 'cloudformation':
cf = mocks['cloudformation']
resource = {
'StackResourceDetail': {'ResourceStatus': 'CREATE_COMPLETE',
'ResourceType': 'AWS::IAM::Role',
'PhysicalResourceId': 'my-referenced-role'}}
cf.describe_stack_resource.return_value = resource
cf.describe_stacks.return_value = {
'Stacks': [{
'Parameters': [],
'Tags': [],
'StackName': 'test-1',
'CreationTime': datetime(2016, 8, 31, 6, 16, 37, 917000,
tzinfo=timezone.utc),
'DisableRollback': False,
'Description': 'Test1',
'StackStatus': 'CREATE_COMPLETE',
'NotificationARNs': [],
'StackId': 'arn:aws:cloudformation:eu-central-1:test'}
],
'ResponseMetadata': {},
'RequestId': 'test'
}
return cf
return mocks[rtype]
monkeypatch.setattr('boto3.client', my_client)
return mocks
@pytest.fixture
def boto_resource(monkeypatch):
def my_resource(rtype, *args):
if rtype == 'cloudformation':
res = MagicMock()
res.resource_type = 'AWS::Route53::RecordSet'
res.physical_resource_id = 'test-1.example.org'
res.logical_id = 'VersionDomain'
res.last_updated_timestamp = datetime.now()
res2 = MagicMock()
res2.resource_type = 'AWS::Route53::RecordSet'
res2.physical_resource_id = 'mydomain.example.org'
res2.logical_id = 'MainDomain'
res2.last_updated_timestamp = datetime.now()
res3 = MagicMock()
res3.resource_type = 'AWS::Route53::RecordSet'
res3.physical_resource_id = 'test-2.example.org'
res3.logical_id = 'VersionDomain'
res3.last_updated_timestamp = datetime.now()
stack = MagicMock()
stack.resource_summaries.all.return_value = [res, res2, res3]
cf = MagicMock()
cf.Stack.return_value = stack
return cf
if rtype == 'ec2':
ec2 = MagicMock()
ec2.security_groups.filter.return_value = [
MagicMock(name='app-sg', id='sg-007')]
ec2.vpcs.all.return_value = [MagicMock(vpc_id='vpc-123')]
ec2.images.filter.return_value = [
MagicMock(name='Taupage-AMI-123', id='ami-123')]
ec2.subnets.filter.return_value = [MagicMock(tags=[{'Key': 'Name', 'Value': 'internal-myregion-1a'}],
id='subnet-abc123',
availability_zone='myregion-1a'),
MagicMock(tags=[{'Key': 'Name',
'Value': 'internal-myregion-1b'}],
id='subnet-def456',
availability_zone='myregion-1b'),
MagicMock(tags=[{'Key': 'Name',
'Value': 'dmz-myregion-1a'}],
id='subnet-ghi789',
availability_zone='myregion-1a')]
return ec2
elif rtype == 'iam':
iam = MagicMock()
iam.server_certificates.all.return_value = [SERVER_CERT_ZO_NE]
return iam
elif rtype == 'sns':
sns = MagicMock()
topic = MagicMock(arn='arn:123:mytopic')
sns.topics.all.return_value = [topic]
return sns
return MagicMock()
monkeypatch.setattr('boto3.resource', my_resource)
@pytest.fixture
def disable_version_check(monkeypatch):
m = MagicMock()
monkeypatch.setattr('senza.subcommands.root.check_senza_version', m)
@pytest.fixture(autouse=True)
def valid_regions(monkeypatch):
m_session = MagicMock()
m_session.return_value = m_session
m_session.get_available_regions.return_value = ['aa-fakeregion-1']
monkeypatch.setattr('boto3.session.Session', m_session)
return m_session
|
11526434
|
expected_output = {
'index': {
1: {
'descr': 'Cisco Systems Cisco 7600 6-slot Chassis System',
'name': 'CISCO7606',
'pid': 'CISCO7606',
'sn': 'FOX11140RN8',
},
2: {
'descr': 'OSR-7600 Clock FRU 1',
'name': 'CLK-7600 1',
'pid': 'CLK-7600',
'sn': 'NWG1112014W',
},
3: {
'descr': 'OSR-7600 Clock FRU 2',
'name': 'CLK-7600 2',
'pid': 'CLK-7600',
'sn': 'NWG1112014W',
},
4: {
'descr': 'WS-X6748-GE-TX CEF720 48 port 10/100/1000mb Ethernet Rev. 2.7',
'name': 'module 1',
'pid': 'WS-X6748-GE-TX',
'sn': 'SAL1209HMW3',
'vid': 'V02',
},
5: {
'descr': 'WS-F6700-CFC Centralized Forwarding Card Rev. 4.0',
'name': 'switching engine sub-module of 1',
'pid': 'WS-F6700-CFC',
'sn': 'SAL1207G5V1',
'vid': 'V05',
},
6: {
'descr': '2 port adapter Enhanced FlexWAN Rev. 2.1',
'name': 'module 2',
'pid': 'WS-X6582-2PA',
'sn': 'JAE0939LYNQ',
'vid': 'V06',
},
7: {
'descr': 'Serial Port Adapter',
'name': 'module 2/1',
'pid': 'PA-4T+',
'sn': '32861325',
},
8: {
'descr': 'WS-SUP720-3BXL 2 ports Supervisor Engine 720 Rev. 4.1',
'name': 'module 5',
'pid': 'WS-SUP720-3BXL',
'sn': 'SAD09020BF8',
'vid': 'V11',
},
9: {
'descr': 'WS-SUP720 MSFC3 Daughterboard Rev. 2.2',
'name': 'msfc sub-module of 5',
'pid': 'WS-SUP720',
'sn': 'SAD090105M6',
},
10: {
'descr': 'WS-F6K-PFC3BXL Policy Feature Card 3 Rev. 1.4',
'name': 'switching engine sub-module of 5',
'pid': 'WS-F6K-PFC3BXL',
'sn': 'SAD090301K6',
},
11: {
'descr': 'WS-SUP720-3BXL 2 ports Supervisor Engine 720 Rev. 5.12',
'name': 'module 6',
'pid': 'WS-SUP720-3BXL',
'sn': 'SAL15129MRC',
'vid': 'V11',
},
12: {
'descr': 'WS-SUP720 MSFC3 Daughterboard Rev. 5.1',
'name': 'msfc sub-module of 6',
'pid': 'WS-SUP720',
'sn': 'SAL15045PYS',
},
13: {
'descr': 'WS-F6K-PFC3BXL Policy Feature Card 3 Rev. 1.11',
'name': 'switching engine sub-module of 6',
'pid': 'WS-F6K-PFC3BXL',
'sn': 'SAL15129KW4',
'vid': 'V02',
},
14: {
'descr': 'AC_6 power supply, 1900 watt 1',
'name': 'PS 1 PWR-1900-AC/6',
'pid': 'PWR-1900-AC/6',
'sn': 'DCA1104401B',
'vid': 'V02',
},
15: {
'descr': 'AC_6 power supply, 1900 watt 2',
'name': 'PS 2 PWR-1900-AC/6',
'pid': 'PWR-1900-AC/6',
'sn': 'DCA11044011',
'vid': 'V02',
},
},
}
|
11526441
|
import os
from functools import partial
import torch.nn as nn
import torch.optim as optim
import torch.optim.lr_scheduler as lr_scheduler
from fsgan.datasets.opencv_video_seq_dataset import VideoSeqPairDataset
from fsgan.datasets.img_landmarks_transforms import RandomHorizontalFlip, Pyramids, ToTensor
from fsgan.criterions.vgg_loss import VGGLoss
from fsgan.criterions.gan_loss import GANLoss
from fsgan.models.res_unet import MultiScaleResUNet
from fsgan.models.discriminators_pix2pix import MultiscaleDiscriminator
from fsgan.train_blending import main
if __name__ == '__main__':
exp_name = os.path.splitext(os.path.basename(__file__))[0]
exp_dir = os.path.join('../results/swapping', exp_name)
train_dataset = partial(VideoSeqPairDataset, '/data/datasets/ijb-c/ijbc_cropped/ijbc_cropped_r256_cs1.2',
'train_list.txt', frame_window=1, ignore_landmarks=True, same_prob=0.0)
val_dataset = partial(VideoSeqPairDataset, '/data/datasets/ijb-c/ijbc_cropped/ijbc_cropped_r256_cs1.2',
'val_list.txt', frame_window=1, ignore_landmarks=True, same_prob=0.0)
numpy_transforms = [RandomHorizontalFlip(), Pyramids(2)]
tensor_transforms = [ToTensor()]
resolutions = [128, 256]
lr_gen = [1e-4, 4e-5]
lr_dis = [1e-5, 4e-6]
epochs = [24, 50]
iterations = ['20k']
batch_size = [32, 16]
workers = 32
pretrained = False
criterion_id = VGGLoss('../../weights/vggface2_vgg19_256_1_2_id.pth')
criterion_attr = VGGLoss('../../weights/celeba_vgg19_256_2_0_28_attr.pth')
criterion_gan = GANLoss(use_lsgan=True)
generator = MultiScaleResUNet(in_nc=7, out_nc=3, flat_layers=(2, 2, 2, 2), ngf=128)
discriminator = MultiscaleDiscriminator(use_sigmoid=True, num_D=2)
optimizer = partial(optim.Adam, betas=(0.5, 0.999))
scheduler = partial(lr_scheduler.StepLR, step_size=10, gamma=0.5)
reenactment_model = '../results/reenactment/ijbc_msrunet_reenactment_attr_no_seg/G_latest.pth'
seg_model = '../../weights/lfw_figaro_unet_256_segmentation.pth'
lms_model = '../../weights/hr18_wflw_landmarks.pth'
rec_weight = 1.0
gan_weight = 0.1
background_value = -1.0
if not os.path.exists(exp_dir):
os.makedirs(exp_dir)
main(exp_dir, train_dataset=train_dataset, val_dataset=val_dataset,
numpy_transforms=numpy_transforms, tensor_transforms=tensor_transforms, resolutions=resolutions,
lr_gen=lr_gen, lr_dis=lr_dis, epochs=epochs, iterations=iterations, batch_size=batch_size, workers=workers,
optimizer=optimizer, scheduler=scheduler, pretrained=pretrained,
criterion_id=criterion_id, criterion_attr=criterion_attr, criterion_gan=criterion_gan,
generator=generator, discriminator=discriminator, reenactment_model=reenactment_model, seg_model=seg_model,
lms_model=lms_model, rec_weight=rec_weight, gan_weight=gan_weight, background_value=background_value)
os.system('sudo shutdown')
|
11526446
|
from sklearn import svm
from sklearn import tree
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.linear_model import SGDClassifier
from util import readDatabase
# Read the training / testing dataset and labels
xTrain, yTrain, xTest, yTest, yLabels = readDatabase(reshape=False, categoricalValues=False)
computeNearestNeighbors = True
computeSVM = True
computeSGD = True
computeNaiveBayes = True
computeDecisionTrees = True
computeAdaboost = True
computeGradientBoosting = True
computeRandomForest = True
computeExtremellyRandomForest = True
# =================================================================================================#
# Nearest neighbor
# Train the model
if computeNearestNeighbors:
noNeighbors = 3
descriptorName = 'Nearest neighbors ({})'.format(noNeighbors)
print("Train {}".format(descriptorName))
clfNB = KNeighborsClassifier(n_neighbors=noNeighbors)
clfNB.fit(xTrain, yTrain)
# Compute the accuracy of the model
valuePredicted = clfNB.predict(xTest)
accuracy = accuracy_score(y_true=yTest, y_pred=valuePredicted)
confusionMatrix = confusion_matrix(y_true=yTest, y_pred=valuePredicted)
print('{}: {}'.format(descriptorName, accuracy))
# =================================================================================================#
# =================================================================================================#
# SGD
# Train the model
if computeSGD:
descriptorName = 'SGD'
print("Train {}".format(descriptorName))
clf = SGDClassifier(loss="hinge", penalty="l2")
clf.fit(xTrain, yTrain)
# Compute the accuracy of the model
valuePredicted = clf.predict(xTest)
accuracy = accuracy_score(y_true=yTest, y_pred=valuePredicted)
confusionMatrix = confusion_matrix(y_true=yTest, y_pred=valuePredicted)
print('{}: {}'.format(descriptorName, accuracy))
# =================================================================================================#
# Naive Bayes
if computeNaiveBayes:
descriptorName = 'Naive Bayes'
print("Train {}".format(descriptorName))
clf = GaussianNB()
clf.fit(xTrain, yTrain)
# Compute the accuracy of the model
valuePredicted = clf.predict(xTest)
accuracy = accuracy_score(y_true=yTest, y_pred=valuePredicted)
confusionMatrix = confusion_matrix(y_true=yTest, y_pred=valuePredicted)
print('{}: {}'.format(descriptorName, accuracy))
# =================================================================================================#
# Decision trees
if computeDecisionTrees:
descriptorName = 'Decision Tree Classifier '
print("Train {}".format(descriptorName))
clf = tree.DecisionTreeClassifier()
clf.fit(xTrain, yTrain)
# Compute the accuracy of the model
valuePredicted = clf.predict(xTest)
accuracy = accuracy_score(y_true=yTest, y_pred=valuePredicted)
confusionMatrix = confusion_matrix(y_true=yTest, y_pred=valuePredicted)
print('{}: {}'.format(descriptorName, accuracy))
# =================================================================================================#
# AdaBoost model
if computeAdaboost:
descriptorName = 'Adaboost Classifier '
print("Train {}".format(descriptorName))
clf = AdaBoostClassifier(n_estimators=100)
clf.fit(xTrain, yTrain)
# Compute the accuracy of the model
valuePredicted = clf.predict(xTest)
accuracy = accuracy_score(y_true=yTest, y_pred=valuePredicted)
confusionMatrix = confusion_matrix(y_true=yTest, y_pred=valuePredicted)
print('{}: {}'.format(descriptorName, accuracy))
# =================================================================================================#
# Gradient Boosting Classifier
if computeGradientBoosting:
descriptorName = 'Gradient Boosting Classifier'
print("Train {}".format(descriptorName))
clf = GradientBoostingClassifier(n_estimators=200, learning_rate=1.0, max_depth=1, random_state=0)
clf.fit(xTrain, yTrain)
# Compute the accuracy of the model
valuePredicted = clf.predict(xTest)
accuracy = accuracy_score(y_true=yTest, y_pred=valuePredicted)
confusionMatrix = confusion_matrix(y_true=yTest, y_pred=valuePredicted)
print('{}: {}'.format(descriptorName, accuracy))
# =================================================================================================#
# Random Forest Classifier
if computeRandomForest:
descriptorName = 'Random Forest Classifier'
print("Train {}".format(descriptorName))
# Train the model
clfRF = RandomForestClassifier(n_estimators=200, criterion="gini")
clfRF.fit(xTrain, yTrain)
# Compute the accuracy of the model
valuePredicted = clfRF.predict(xTest)
accuracy = accuracy_score(y_true=yTest, y_pred=valuePredicted)
confusionMatrix = confusion_matrix(y_true=yTest, y_pred=valuePredicted)
print('{}: {}'.format(descriptorName, accuracy))
# Extremelly RandomForest Classifier
if computeExtremellyRandomForest:
descriptorName = 'Extremelly Trees Classifier'
print("Train {}".format(descriptorName))
# Train the model
clfRF = ExtraTreesClassifier(n_estimators=200, criterion="gini")
clfRF.fit(xTrain, yTrain)
# Compute the accuracy of the model
valuePredicted = clfRF.predict(xTest)
accuracy = accuracy_score(y_true=yTest, y_pred=valuePredicted)
confusionMatrix = confusion_matrix(y_true=yTest, y_pred=valuePredicted)
print('{}: {}'.format(descriptorName, accuracy))
# Support vector machines
descriptorName = 'SVM Linear'
cValues = [0.01, 0.1, 1, 10]
if computeSVM:
for cValue in cValues:
descriptorName = 'Linear SVM with C={} '.format(cValue)
print("Train {}".format(descriptorName))
clfSVM = svm.SVC(C=cValue, kernel='linear', verbose=False)
clfSVM.fit(xTrain, yTrain)
# Compute the accuracy of the model
valuePredicted = clfSVM.predict(xTest)
accuracy = accuracy_score(y_true=yTest, y_pred=valuePredicted)
confusionMatrix = confusion_matrix(y_true=yTest, y_pred=valuePredicted)
print('{}: {}'.format(descriptorName, accuracy))
descriptorName = 'SVM RBF'
cValues = [0.01, 0.1, 1, 10]
if computeSVM:
for cValue in cValues:
descriptorName = 'SVM with C={} '.format(cValue)
print("Train {}".format(descriptorName))
clfSVM = svm.SVC(C=cValue, class_weight=None,
gamma='auto', kernel='rbf',
verbose=False)
clfSVM.fit(xTrain, yTrain)
# Compute the accuracy of the model
valuePredicted = clfSVM.predict(xTest)
accuracy = accuracy_score(y_true=yTest, y_pred=valuePredicted)
confusionMatrix = confusion_matrix(y_true=yTest, y_pred=valuePredicted)
print('{}: {}'.format(descriptorName, accuracy))
'''
# Obtained results
Nearest neighbors (3): 0.9705
SGD: 0.8985
Train Naive Bayes
Naive Bayes: 0.5558
Train Decision Tree Classifier
Decision Tree Classifier : 0.879
Train Adaboost Classifier
Adaboost Classifier : 0.7296
Train Gradient Boosting Classifier
Gradient Boosting Classifier: 0.6615
Train Random Forest Classifier
Random Forest Classifier: 0.9704
Train Extremelly Trees Classifier
Extremelly Trees Classifier: 0.9735
Linear SVM with C=0.01 : 0.9443
Linear SVM with C=0.1 : 0.9472
Linear SVM with C=1 : 0.9404
Linear SVM with C=10 : 0.931
RBF SVM with C=0.01 : 0.835
RBF SVM with C=0.1 : 0.9166
RBF SVM with C=1 : 0.9446
RBF SVM with C=10 : 0.9614
'''
|
11526455
|
import json
import os
import unittest
from aptos.parser import SchemaParser
from aptos.schema.visitor import AvroSchemaVisitor
BASE_DIR = os.path.dirname(__file__)
class AvroSchemaTestCase(unittest.TestCase):
def runTest(self):
with open(os.path.join(BASE_DIR, 'schema', 'product')) as fp:
schema = json.load(fp)
component = SchemaParser.parse(schema)
schema = component.accept(AvroSchemaVisitor())
self.assertEqual(len(schema['fields']), 6)
with open(os.path.join(BASE_DIR, 'schema', 'inventory')) as fp:
schema = json.load(fp)
component = SchemaParser.parse(schema)
schema = component.accept(AvroSchemaVisitor())
self.assertEqual(len(schema['fields']), 5)
|
11526465
|
import torch
from torch.nn.utils.rnn import pad_sequence
def collate_input_sequences(samples):
"""Returns a batch of data given a list of samples.
Args:
samples: List of (x, y) where:
`x`: A tuple:
- `torch.Tensor`: an input sequence to the network with size
`(len(torch.Tensor), n_features)`.
- `int`: the length of the corresponding output sequence
produced by the network given the `torch.Tensor` as
input.
`y`: A `torch.Tensor` containing the target output sequence.
Returns:
A tuple of `((batch_x, batch_out_lens), batch_y)` where:
batch_x: The concatenation of all `torch.Tensor`'s in `x` along a
new dim in descending order by `torch.Tensor` length.
This results in a `torch.Tensor` of size (L, N, D) where L is
the maximum `torch.Tensor` length, N is the number of samples,
and D is n_features.
`torch.Tensor`'s shorter than L are extended by zero padding.
batch_out_lens: A `torch.IntTensor` containing the `int` values
from `x` in an order that corresponds to the samples in
`batch_x`.
batch_y: A list of `torch.Tensor` containing the `y` `torch.Tensor`
sequences in an order that corresponds to the samples in
`batch_x`.
Example:
>>> x = [# input seq, len 5, 2 features. output seq, len 2
... (torch.full((5, 2), 1.0), 2),
... # input seq, len 4, 2 features. output seq, len 3
... (torch.full((4, 2), 2.0), 3)]
>>> y = [torch.full((4,), 1.0), # target seq, len 4
... torch.full((3,), 2.0)] # target seq, len 3
>>> smps = list(zip(x, y))
>>> (batch_x, batch_out_lens), batch_y = collate_input_sequences(smps)
>>> print('%r' % batch_x)
tensor([[[ 1., 1.],
[ 2., 2.]],
[[ 1., 1.],
[ 2., 2.]],
[[ 1., 1.],
[ 2., 2.]],
[[ 1., 1.],
[ 2., 2.]],
[[ 1., 1.],
[ 0., 0.]]])
>>> print('%r' % batch_out_lens)
tensor([ 2, 3], dtype=torch.int32)
>>> print('%r' % batch_y)
[tensor([ 1., 1., 1., 1.]), tensor([ 2., 2., 2.])]
"""
samples = [(*x, y) for x, y in samples]
sorted_samples = sorted(samples, key=lambda s: len(s[0]), reverse=True)
seqs, seq_lens, labels = zip(*sorted_samples)
x = (pad_sequence(seqs), torch.IntTensor(seq_lens))
y = list(labels)
return x, y
|
11526474
|
from pysys.basetest import BaseTest
import time
"""
Validate end to end behaviour for a failing installation
When we install a package that cannot be installed with the apt package manager
Then we receive a failure from the apt plugin
"""
import time
import sys
from environment_sm_management import SoftwareManagement
class PySysTest(SoftwareManagement):
def setup(self):
super().setup()
def execute(self):
self.trigger_action("does_not_exist", "5446165", "::apt", "notanurl", "install")
self.wait_until_fail()
def validate(self):
self.assertThat(
"False == value", value=self.check_is_installed("does_not_exist")
)
|
11526476
|
from rpython.flowspace.model import Constant
from rpython.annotator.model import SomeNone
from rpython.rtyper.rmodel import Repr, TyperError, inputconst
from rpython.rtyper.lltypesystem.lltype import Void, Bool, Ptr, Char
from rpython.rtyper.lltypesystem.llmemory import Address
from rpython.rtyper.lltypesystem.rpbc import SmallFunctionSetPBCRepr
from rpython.rtyper.annlowlevel import llstr
from rpython.tool.pairtype import pairtype
class NoneRepr(Repr):
lowleveltype = Void
def rtype_bool(self, hop):
return Constant(False, Bool)
def none_call(self, hop):
raise TyperError("attempt to call constant None")
def ll_str(self, none):
return llstr("None")
def get_ll_eq_function(self):
return None
def get_ll_hash_function(self):
return ll_none_hash
rtype_simple_call = none_call
rtype_call_args = none_call
none_repr = NoneRepr()
class __extend__(SomeNone):
def rtyper_makerepr(self, rtyper):
return none_repr
def rtyper_makekey(self):
return self.__class__,
def ll_none_hash(_):
return 0
class __extend__(pairtype(Repr, NoneRepr)):
def convert_from_to((r_from, _), v, llops):
return inputconst(Void, None)
def rtype_is_((robj1, rnone2), hop):
if hop.s_result.is_constant():
return hop.inputconst(Bool, hop.s_result.const)
return rtype_is_None(robj1, rnone2, hop)
class __extend__(pairtype(NoneRepr, Repr)):
def convert_from_to((_, r_to), v, llops):
return inputconst(r_to, None)
def rtype_is_((rnone1, robj2), hop):
if hop.s_result.is_constant():
return hop.inputconst(Bool, hop.s_result.const)
return rtype_is_None(robj2, rnone1, hop, pos=1)
def rtype_is_None(robj1, rnone2, hop, pos=0):
if isinstance(robj1.lowleveltype, Ptr):
v1 = hop.inputarg(robj1, pos)
return hop.genop('ptr_iszero', [v1], resulttype=Bool)
elif robj1.lowleveltype == Address:
v1 = hop.inputarg(robj1, pos)
cnull = hop.inputconst(Address, robj1.null_instance())
return hop.genop('adr_eq', [v1, cnull], resulttype=Bool)
elif robj1 == none_repr:
return hop.inputconst(Bool, True)
elif isinstance(robj1, SmallFunctionSetPBCRepr):
if robj1.s_pbc.can_be_None:
v1 = hop.inputarg(robj1, pos)
return hop.genop('char_eq', [v1, inputconst(Char, '\000')],
resulttype=Bool)
else:
return inputconst(Bool, False)
else:
raise TyperError('rtype_is_None of %r' % (robj1))
|
11526477
|
import os
import sys
import argparse
import platform
import pathlib
from signal import signal, SIGINT
import debugpy
### This block is only for debugging from samples/simple_demo directory.
### You don't need it when have azdebugrelay module installed.
import pkg_resources
_AZDEBUGRELYNAME = "azdebugrelay"
_required_azdebugrelay = {_AZDEBUGRELYNAME}
_installed_azdebugrelay = {pkg.key for pkg in pkg_resources.working_set}
_missing_azdebugrelay = _required_azdebugrelay - _installed_azdebugrelay
if _missing_azdebugrelay:
_workspace_dir = pathlib.Path(__file__).parent.parent.parent.absolute()
_azdebugrelay_dir = os.path.dirname(
os.path.join(_workspace_dir, "azdebugrelay"))
sys.path.insert(0, _azdebugrelay_dir)
###############
from azdebugrelay import DebugRelay, DebugMode, debugpy_connect_with_timeout
g_debug_relay = None
def do_work():
"""Just a demo function. We debug it.
"""
print("Hello world!")
plat = platform.platform()
debugpy.breakpoint() # you can put a real VSCode breakpoint
print(plat) # the debugger will stop here because debugpy.breakpoint() call above
def _signal_handler(signal_received, frame):
global g_debug_relay
if g_debug_relay is not None:
g_debug_relay.close()
g_debug_relay = None
exit(0)
def _check_for_debugging(args) -> DebugRelay:
"""An over-engineered debugger initialization function.
Parses command-line arguments looking for `--debug` option.
If found option's value defines debugging behaviour:
* `attach` - connects to a remote debugger (your VS Code in `listen` mode)
* `listen` - starts listening for a remote debugger to connect
* `none` (default) - do not start a DebugRelay
Args:
args: Command line arguments
Returns:
DebugRelay: running DebugRelay object
"""
debug_relay = None
parser = argparse.ArgumentParser()
parser.add_argument('--debug', action='store',
default="none", choices=['attach', 'listen', 'none'], required=False)
options, _ = parser.parse_known_args(args=args)
if options.debug != "none":
print(f"Starting DebugRelay in `{options.debug}` mode.")
config_file = "./.azrelay.json"
mode = DebugMode.Connect if options.debug == "attach" else DebugMode.WaitForConnection
if os.path.exists(config_file):
debug_relay = DebugRelay.from_config(config_file, debug_mode=mode)
else:
debug_relay = DebugRelay.from_environment(debug_mode=mode)
# you can also create DebugRelay directly by providing connection string and the rest of its configuration:
# debug_relay = DebugRelay(access_key_or_connection_string, relay_connection_name, debug_mode, hybrid_connection_url, host, ports)
if debug_relay is None:
print("Cannot create Debugging Relay due to missing configuration.")
return None
DebugRelay.kill_relays()
debug_relay.open()
if debug_relay.is_running():
print("Connecting to the remote host...")
if options.debug == "attach":
debugpy_connect_with_timeout("127.0.0.1", 5678, 15)
else:
debugpy.listen(("127.0.0.1", 5678))
debugpy.wait_for_client()
print("Connected!!!")
return debug_relay
def _main(args):
"""CLI entry point
Args:
args: Command Line arguments
"""
global g_debug_relay
g_debug_relay = _check_for_debugging(args)
signal(SIGINT, _signal_handler)
do_work()
if g_debug_relay is not None:
g_debug_relay.close()
if __name__ == '__main__':
_main(sys.argv[1:])
|
11526487
|
from django.conf import settings
from django.conf.urls import patterns, include, url
urlpatterns = patterns('editor.views',
url(r'^login/$', 'login_view'),
url(r'^logout/(.*)$', 'logout_view'),
url(r'^generate-api-key$', 'gen_api_key'),
url(r'^ekey$', 'ekey'),
url(r'^get-ekey$', 'get_ekey'),
url(r'^error$', 'report_error'),
url(r'^credits$', 'credits'),
)
|
11526492
|
from keras.layers import Input, Dense, Reshape, Add, Activation, Lambda, Conv1D
from keras import layers
from keras.models import Model
from keras.optimizers import RMSprop
import keras.backend as K
import numpy as np
def stop_grad(ys):
return K.stop_gradient(ys[0] - ys[1]) + ys[1]
class WGAN(object):
def __init__(self, input_shape, latent_dim, tau=0.75, gumbel=False, hard_gumbel=False):
self.channels = 1
self.inputShape_ = input_shape
self.latentDim_ = latent_dim
self.gumbel_ = gumbel
self.hardGumbel_ = hard_gumbel
# Following parameter and optimizer set as recommended in paper
self.n_critic = 5
self.clip_value = 0.01
self.tau_ = tau
optimizer = RMSprop()
# Build and compile the critic
self.critic = self.build_critic()
self.critic.compile(loss=self.wasserstein_loss,
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generated imgs
gen_ins = self.generator.inputs
img = self.generator(gen_ins)
# For the combined model we will only train the generator
self.critic.trainable = False
# The critic takes generated images as input and determines validity
valid = self.critic(img)
# The combined model (stacked generator and critic)
self.combined = Model(gen_ins, valid)
self.combined.compile(loss=self.wasserstein_loss,
optimizer=optimizer,
metrics=['accuracy'])
def wasserstein_loss(self, y_true, y_pred):
return K.mean(y_true * y_pred)
def build_generator(self):
L = self.inputShape_[0]
z = Input(shape=(self.latentDim_,))
x = Dense(100 * self.inputShape_[0])(z)
x = Reshape((L, 100,))(x)
# res block 1:
res_in = x
x = layers.Activation('relu')(x)
x = Conv1D(100, 5, padding='same')(x)
x = layers.Activation('relu')(x)
x = Conv1D(100, 5, padding='same')(x)
x = Lambda(lambda z: z * 0.3)(x)
x = Add()([res_in, x])
# res block 2:
res_in = x
x = layers.Activation('relu')(x)
x = Conv1D(100, 5, padding='same')(x)
x = layers.Activation('relu')(x)
x = Conv1D(100, 5, padding='same')(x)
x = Lambda(lambda z: z * 0.3)(x)
x = Add()([res_in, x])
# res block 3:
res_in = x
x = layers.Activation('relu')(x)
x = Conv1D(100, 5, padding='same')(x)
x = layers.Activation('relu')(x)
x = Conv1D(100, 5, padding='same')(x)
x = Lambda(lambda z: z * 0.3)(x)
x = Add()([res_in, x])
# res block 4:
res_in = x
x = layers.Activation('relu')(x)
x = Conv1D(100, 5, padding='same')(x)
x = layers.Activation('relu')(x)
x = Conv1D(100, 5, padding='same')(x)
x = Lambda(lambda z: z * 0.3)(x)
x = Add()([res_in, x])
# res block 5:
res_in = x
x = layers.Activation('relu')(x)
x = Conv1D(100, 5, padding='same')(x)
x = layers.Activation('relu')(x)
x = Conv1D(100, 5, padding='same')(x)
x = Lambda(lambda z: z * 0.3)(x)
x = Add()([res_in, x])
x = Conv1D(self.inputShape_[-1], 1, padding='same')(x)
logits = x
if self.gumbel_:
# U = Input(tensor=K.random_uniform(K.shape(logits), 0, 1))
eps = 1e-20
g = Lambda(
lambda y: 1. / (self.tau_) * (y - K.log(-K.log(K.random_uniform(K.shape(logits), 0, 1) + eps) + eps)))(
logits)
out = layers.Activation('softmax')(g)
if self.hardGumbel_:
k = K.shape(logits)[-1]
out_hard = Lambda(lambda y: K.tf.cast(K.tf.equal(y, K.tf.reduce_max(y, 1, keepdims=True)), y.dtype))(
out)
out = Lambda(stop_grad)([out_hard, out])
model = Model(inputs=z, outputs=out)
else:
out = Activation('softmax')(logits)
model = Model(inputs=z, outputs=out)
return model
def build_critic(self):
L = self.inputShape_[0]
x = Input(shape=self.inputShape_)
y = Conv1D(100, 1, padding='same')(x)
# res block 1:
res_in = y
y = layers.Activation('relu')(y)
y = Conv1D(100, 5, padding='same')(y)
y = layers.Activation('relu')(y)
y = Conv1D(100, 5, padding='same')(y)
y = Lambda(lambda z: z * 0.3)(y)
y = Add()([res_in, y])
# res block 2:
res_in = y
y = layers.Activation('relu')(y)
y = Conv1D(100, 5, padding='same')(y)
y = layers.Activation('relu')(y)
y = Conv1D(100, 5, padding='same')(y)
y = Lambda(lambda z: z * 0.3)(y)
y = Add()([res_in, y])
# res block 3:
res_in = y
y = layers.Activation('relu')(y)
y = Conv1D(100, 5, padding='same')(y)
y = layers.Activation('relu')(y)
y = Conv1D(100, 5, padding='same')(y)
y = Lambda(lambda z: z * 0.3)(y)
y = Add()([res_in, y])
# res block 4:
res_in = y
y = layers.Activation('relu')(y)
y = Conv1D(100, 5, padding='same')(y)
y = layers.Activation('relu')(y)
y = Conv1D(100, 5, padding='same')(y)
y = Lambda(lambda z: z * 0.3)(y)
y = Add()([res_in, y])
# res block 5:
res_in = y
y = layers.Activation('relu')(y)
y = Conv1D(100, 5, padding='same')(y)
y = layers.Activation('relu')(y)
y = Conv1D(100, 5, padding='same')(y)
y = Lambda(lambda z: z * 0.3)(y)
y = Add()([res_in, y])
y = Reshape((L * 100,))(y)
out = Dense(1)(y)
model = Model(inputs=x, outputs=out)
return model
def train(self, X_train, epochs, batch_size=128, verbose=0):
# Adversarial ground truths
valid = -np.ones((batch_size, 1))
fake = np.ones((batch_size, 1))
for epoch in range(epochs):
for _ in range(self.n_critic):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
# Sample noise as generator input
noise = np.random.normal(0, 1, (batch_size, self.latentDim_))
# Generate a batch of new images
gen_imgs = self.generator.predict(noise)
# Train the critic
d_loss_real = self.critic.train_on_batch(imgs, valid)
d_loss_fake = self.critic.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_fake, d_loss_real)
# Clip critic weights
for l in self.critic.layers:
weights = l.get_weights()
weights = [np.clip(w, -self.clip_value, self.clip_value) for w in weights]
l.set_weights(weights)
# ---------------------
# Train Generator
# ---------------------
noise = np.random.normal(0, 1, (batch_size, self.latentDim_))
g_loss = self.combined.train_on_batch(noise, valid)
# Plot the progress
if verbose:
print("%d [D loss: %f] [G loss: %f]" % (epoch, 1 - d_loss[0], 1 - g_loss[0]))
def sample(self, noise):
sampled_x = self.generator.predict(noise)
return sampled_x
|
11526503
|
from decimal import Decimal
import unittest.mock
import hummingbot.strategy.cross_exchange_market_making.start as strategy_start
from hummingbot.connector.exchange_base import ExchangeBase
from hummingbot.strategy.cross_exchange_market_making.cross_exchange_market_making_config_map import (
cross_exchange_market_making_config_map as strategy_cmap
)
from hummingbot.client.config.global_config_map import global_config_map
from test.hummingbot.strategy import assign_config_default
class XEMMStartTest(unittest.TestCase):
def setUp(self) -> None:
super().setUp()
self.strategy = None
self.markets = {"binance": ExchangeBase(), "kucoin": ExchangeBase()}
self.notifications = []
self.log_errors = []
assign_config_default(strategy_cmap)
strategy_cmap.get("maker_market").value = "binance"
strategy_cmap.get("taker_market").value = "kucoin"
strategy_cmap.get("maker_market_trading_pair").value = "ETH-USDT"
strategy_cmap.get("taker_market_trading_pair").value = "ETH-USDT"
strategy_cmap.get("order_amount").value = Decimal("1")
strategy_cmap.get("min_profitability").value = Decimal("2")
global_config_map.get("strategy_report_interval").value = 60.
strategy_cmap.get("use_oracle_conversion_rate").value = False
def _initialize_market_assets(self, market, trading_pairs):
return [("ETH", "USDT")]
def _initialize_markets(self, market_names):
pass
def _notify(self, message):
self.notifications.append(message)
def logger(self):
return self
def error(self, message, exc_info):
self.log_errors.append(message)
def test_strategy_creation(self):
strategy_start.start(self)
self.assertEqual(self.strategy.order_amount, Decimal("1"))
self.assertEqual(self.strategy.min_profitability, Decimal("0.02"))
|
11526517
|
import sys
fi=open(sys.argv[1])
fo=open(sys.argv[2],'w')
header=fi.readline().rstrip().split('\t')
h=[]
for one in header:
h.append(one.split('#')[0])
header=h
G={}
i=1
while i<len(header):
if header[i] in G:
G[header[i]].append(i)
else:
G[header[i]]=[i]
i=i+1
new_header=[]
for one in G:
new_header.append(one)
fo.write('\t'.join(new_header)+'\n')
for line in fi:
seq=line.rstrip().split('\t')
fo.write(seq[0])
for one in new_header:
tmp=[]
for exp in G[one]:
tmp.append(int(seq[exp]))
tmp=sum(tmp)
fo.write('\t'+str(tmp))
fo.write('\n')
|
11526548
|
from typing import Any, Dict
from pytest import Session
def _patch_plotly_show() -> None:
"""Monkey patch ``plotly.io.show`` as to not perform any rendering and,
instead, simply call ``plotly.io._utils.validate_coerce_fig_to_dict``"""
from typing import Union
import plotly.io
from plotly.graph_objs import Figure
from plotly.io._utils import validate_coerce_fig_to_dict # noqa
def wrapped(
fig: Union[Figure, dict], renderer: str = None, validate: bool = True, **kwargs: Dict[str, Any]
) -> None:
validate_coerce_fig_to_dict(fig, validate)
plotly.io.show = wrapped
def pytest_sessionstart(session: Session) -> None:
"""Called after the ``Session`` object has been created and before
performing collection and entering the run test loop.
:param pytest.Session session: The pytest session object.
References
----------
* https://docs.pytest.org/en/6.2.x/reference.html#initialization-hooks
"""
_patch_plotly_show()
|
11526562
|
import os
import sys
import logging
import subprocess
from setuptools import setup, find_packages
# setup logging
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
log = logging.getLogger()
# package description and keywords
description = 'Python Tools for reading and writing data from the ESA CryoSat-2 mission'
keywords = 'CryoSat-2 radar altimetry, SIRAL, surface elevation and change'
# get long_description from README.rst
with open("README.rst", "r") as fh:
long_description = fh.read()
long_description_content_type = "text/x-rst"
# get version
with open('version.txt') as fh:
version = fh.read()
# list of all scripts to be included with package
scripts=[os.path.join('scripts',f) for f in os.listdir('scripts') if f.endswith('.py')]
# run cmd from the command line
def check_output(cmd):
return subprocess.check_output(cmd).decode('utf')
# install requirements and dependencies
on_rtd = os.environ.get('READTHEDOCS') == 'True'
if on_rtd:
install_requires = []
else:
# get install requirements
with open('requirements.txt') as fh:
install_requires = fh.read().splitlines()
# check if GDAL is installed
gdal_output = [None] * 4
try:
for i, flag in enumerate(("--cflags", "--libs", "--datadir", "--version")):
gdal_output[i] = check_output(['gdal-config', flag]).strip()
except:
log.warning('Failed to get options via gdal-config')
else:
log.info("GDAL version from via gdal-config: {0}".format(gdal_output[3]))
# if setting GDAL version from via gdal-config
if gdal_output[3]:
# add version information to gdal in install_requires
gdal_index = install_requires.index('gdal')
install_requires[gdal_index] = 'gdal=={0}'.format(gdal_output[3])
setup(
name='read-cryosat-2',
version=version,
description=description,
long_description=long_description,
long_description_content_type=long_description_content_type,
url='https://github.com/tsutterley/read-cryosat-2',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
],
keywords=keywords,
packages=find_packages(),
install_requires=install_requires,
scripts=scripts,
include_package_data=True,
)
|
11526585
|
import numpy as np
def softmax(X, theta=1.0, axis=None):
"""
Compute the softmax of each element along an axis of X.
Source: https://nolanbconaway.github.io/blog/2017/softmax-numpy
Parameters
----------
X: ND-Array. Probably should be floats.
theta (optional): float parameter, used as a multiplier
prior to exponentiation. Default = 1.0
axis (optional): axis to compute values along. Default is the
first non-singleton axis.
Returns an array the same size as X. The result will sum to 1
along the specified axis.
"""
# make X at least 2d
y = np.atleast_2d(X)
# find axis
if axis is None:
axis = next(j[0] for j in enumerate(y.shape) if j[1] > 1)
# multiply y against the theta parameter,
y = y * float(theta)
# subtract the max for numerical stability
y = y - np.expand_dims(np.max(y, axis=axis), axis)
# exponentiate y
y = np.exp(y)
# take the sum along the specified axis
ax_sum = np.expand_dims(np.sum(y, axis=axis), axis)
# finally: divide elementwise
p = y / ax_sum
# flatten if X was 1D
if len(X.shape) == 1: p = p.flatten()
return p
|
11526601
|
from collections import defaultdict
from typing import List
from sqlalchemy import select
from app.models import async_session, DatabaseHelper
from app.models.schema.testcase_data import PityTestcaseDataForm
from app.models.testcase_data import PityTestcaseData
from app.utils.logger import Log
class PityTestcaseDataDao(object):
log = Log("PityTestcaseDataDao")
@staticmethod
async def insert_testcase_data(form: PityTestcaseDataForm, user: int):
try:
async with async_session() as session:
async with session.begin():
sql = select(PityTestcaseData).where(PityTestcaseData.case_id == form.case_id,
PityTestcaseData.env == form.env,
PityTestcaseData.name == form.name,
PityTestcaseData.deleted_at == 0)
result = await session.execute(sql)
query = result.scalars().first()
if query is not None:
raise Exception("该数据已存在, 请重新编辑")
data = PityTestcaseData(**form.dict(), user=user)
session.add(data)
await session.flush()
await session.refresh(data)
session.expunge(data)
return data
except Exception as e:
PityTestcaseDataDao.log.error(f"新增测试数据失败, error: {str(e)}")
raise Exception(f"新增测试数据失败, {str(e)}")
@staticmethod
async def update_testcase_data(form: PityTestcaseDataForm, user: int):
try:
async with async_session() as session:
async with session.begin():
sql = select(PityTestcaseData).where(PityTestcaseData.id == form.id,
PityTestcaseData.deleted_at == 0)
result = await session.execute(sql)
query = result.scalars().first()
if query is None:
raise Exception("测试数据不存在")
DatabaseHelper.update_model(query, form, user)
await session.flush()
session.expunge(query)
return query
except Exception as e:
PityTestcaseDataDao.log.error(f"编辑测试数据失败, error: {str(e)}")
raise Exception(f"编辑测试数据失败, {str(e)}")
@staticmethod
async def delete_testcase_data(id: int, user: int):
try:
async with async_session() as session:
async with session.begin():
sql = select(PityTestcaseData).where(PityTestcaseData.id == id,
PityTestcaseData.deleted_at == 0)
result = await session.execute(sql)
query = result.scalars().first()
if query is None:
raise Exception("测试数据不存在")
DatabaseHelper.delete_model(query, user)
except Exception as e:
PityTestcaseDataDao.log.error(f"删除测试数据失败, error: {str(e)}")
raise Exception(f"删除测试数据失败, {str(e)}")
@staticmethod
async def list_testcase_data(case_id: int):
ans = defaultdict(list)
try:
async with async_session() as session:
sql = select(PityTestcaseData).where(PityTestcaseData.case_id == case_id,
PityTestcaseData.deleted_at == 0)
result = await session.execute(sql)
query = result.scalars().all()
for q in query:
ans[q.env].append(q)
return ans
except Exception as e:
PityTestcaseDataDao.log.error(f"查询测试数据失败, error: {str(e)}")
raise Exception(f"查询测试数据失败, {str(e)}")
@staticmethod
async def list_testcase_data_by_env(env: int, case_id: int) -> List[PityTestcaseData]:
try:
async with async_session() as session:
sql = select(PityTestcaseData).where(PityTestcaseData.case_id == case_id,
PityTestcaseData.env == env,
PityTestcaseData.deleted_at == 0)
result = await session.execute(sql)
return result.scalars().all()
except Exception as e:
PityTestcaseDataDao.log.error(f"查询测试数据失败, error: {str(e)}")
raise Exception(f"查询测试数据失败, {str(e)}")
|
11526675
|
import itertools
import math
from functools import lru_cache
from typing import Tuple, Iterator
import cv2
import numpy as np
import numpy.ma as ma
from tqdm import tqdm
from vidgear.gears import WriteGear
from .pose import Pose
class PoseVisualizer:
def __init__(self, pose: Pose, thickness=None):
self.pose = pose
self.thickness = thickness
def _draw_frame(self, frame: ma.MaskedArray, frame_confidence: np.ndarray, img) -> np.ndarray:
background_color = img[0][0] # Estimation of background color for opacity. `mean` is slow
thickness = self.thickness if self.thickness is not None else round(math.sqrt(img.shape[0] * img.shape[1]) / 150)
radius = round(thickness / 2)
for person, person_confidence in zip(frame, frame_confidence):
c = person_confidence.tolist()
idx = 0
for component in self.pose.header.components:
colors = [np.array(c[::-1]) for c in component.colors]
@lru_cache(maxsize=None)
def _point_color(p_i: int):
opacity = c[p_i + idx]
np_color = colors[p_i % len(component.colors)] * opacity + (1 - opacity) * background_color
return tuple([int(c) for c in np_color])
# Draw Points
for i, point_name in enumerate(component.points):
if c[i + idx] > 0:
cv2.circle(img=img, center=tuple(person[i + idx][:2]), radius=radius,
color=_point_color(i), thickness=-1, lineType=16)
if self.pose.header.is_bbox:
point1 = tuple(person[0 + idx].tolist())
point2 = tuple(person[1 + idx].tolist())
color = tuple(np.mean([_point_color(0), _point_color(1)], axis=0))
cv2.rectangle(img=img, pt1=point1, pt2=point2, color=color, thickness=thickness)
else:
int_person = person.astype(np.int32)
# Draw Limbs
for (p1, p2) in component.limbs:
if c[p1 + idx] > 0 and c[p2 + idx] > 0:
point1 = tuple(int_person[p1 + idx].tolist()[:2])
point2 = tuple(int_person[p2 + idx].tolist()[:2])
length = ((point1[0] - point2[0]) ** 2 + (point1[1] - point2[1]) ** 2) ** 0.5
color = tuple(np.mean([_point_color(p1), _point_color(p2)], axis=0))
cv2.line(img, point1, point2, color, thickness, lineType=cv2.LINE_AA)
# deg = math.degrees(math.atan2(point1[1] - point2[1], point1[0] - point2[0]))
# polygon = cv2.ellipse2Poly(
# (int((point1[0] + point2[0]) / 2), int((point1[1] + point2[1]) / 2)),
# (int(length / 2), thickness),
# int(deg),
# 0, 360, 1)
# cv2.fillConvexPoly(img=img, points=polygon, color=color)
idx += len(component.points)
return img
def draw(self, background_color: Tuple[int, int, int] = (255, 255, 255), max_frames: int = None):
int_data = np.array(np.around(self.pose.body.data.data), dtype="int32")
background = np.full((self.pose.header.dimensions.height, self.pose.header.dimensions.width, 3),
fill_value=background_color, dtype="uint8")
for frame, confidence in itertools.islice(zip(int_data, self.pose.body.confidence), max_frames):
yield self._draw_frame(frame, confidence, img=background.copy())
def draw_on_video(self, background_video, max_frames: int = None, blur=False):
int_data = np.array(np.around(self.pose.body.data.data), dtype="int32")
if max_frames is None:
max_frames = len(int_data)
def get_frames(video_path):
cap = cv2.VideoCapture(video_path)
while True:
ret, vf = cap.read()
if not ret:
break
yield vf
cap.release()
if isinstance(background_video, str):
background_video = iter(get_frames(background_video))
for frame, confidence, background in itertools.islice(zip(int_data, self.pose.body.confidence, background_video),
max_frames):
background = cv2.resize(background, (self.pose.header.dimensions.width, self.pose.header.dimensions.height))
if blur:
background = cv2.blur(background, (20, 20))
yield self._draw_frame(frame, confidence, background)
def save_frame(self, f_name: str, frame: np.ndarray):
cv2.imwrite(f_name, frame)
def save_video(self, f_name: str, frames: Iterator, custom_ffmpeg=None):
# image_size = (self.pose.header.dimensions.width, self.pose.header.dimensions.height)
output_params = {
"-vcodec": "libx264",
"-crf": 0,
"-preset": "fast",
"-input_framerate": self.pose.body.fps
}
# Define writer with defined parameters and suitable output filename for e.g. `Output.mp4`
out = WriteGear(output_filename=f_name, logging=False, custom_ffmpeg=custom_ffmpeg, **output_params)
# out = cv2.VideoWriter(f_name, cv2.VideoWriter_fourcc(*'MP4V'), self.pose.body.fps, image_size)
for frame in tqdm(frames):
out.write(frame)
out.close()
|
11526795
|
import pytest
from pyle38.errors import Tile38IdNotFoundError, Tile38KeyNotFoundError
key = "fleet"
id = "truck1"
@pytest.mark.asyncio
async def test_command_pdel(tile38):
response = await tile38.set(key, id).point(1, 1).exec()
assert response.ok
response = await tile38.get(key, id).asObject()
assert response.ok
assert response.object["type"] == "Point"
response = await tile38.pdel(key, "tr*")
assert response.ok
with pytest.raises(Tile38KeyNotFoundError):
await tile38.get(key, id).asObject()
await tile38.set(key, "truck1").point(1, 1).exec()
await tile38.set(key, "bus1").point(1, 2).exec()
await tile38.pdel(key, "t*")
with pytest.raises(Tile38IdNotFoundError):
await tile38.get(key, "truck1").asObject()
|
11526806
|
import torch
import torch.nn as nn
from collections import OrderedDict
from models.resnet import _weights_init
from utils.kfac_utils import fetch_mat_weights
from utils.common_utils import (tensor_to_list, PresetLRScheduler)
from utils.prune_utils import (filter_indices,
filter_indices_ni,
get_threshold,
update_indices,
normalize_factors,
prune_model_ni)
from utils.network_utils import stablize_bn
from tqdm import tqdm
from .hessian_fact import get_trace_hut
from .pyhessian import hessian
from .pyhessian import group_product, group_add, normalization, get_params_grad, hessian_vector_product, orthnormal, cpu2gpu, gpu2cpu
import numpy as np
import time
import scipy.linalg
import os.path
from os import path
class HessianPruner:
def __init__(self,
model,
builder,
config,
writer,
logger,
prune_ratio_limit,
network,
batch_averaged=True,
use_patch=False,
fix_layers=0,
hessian_mode='Trace',
use_decompose=False):
print('Using patch is %s' % use_patch)
self.iter = 0
self.logger = logger
self.writer = writer
self.config = config
self.prune_ratio_limit = prune_ratio_limit
self.network = network
self.batch_averaged = batch_averaged
self.use_decompose = use_decompose
self.known_modules = {'Linear', 'Conv2d'}
if self.use_decompose:
self.known_modules = {'Conv2d'}
self.modules = []
self.model = model
self.builder = builder
self.fix_layers = fix_layers
self.steps = 0
self.use_patch = False # use_patch
self.W_pruned = {}
self.S_l = None
self.hessian_mode = hessian_mode
self.importances = {}
self._inversed = False
self._cfgs = {}
self._indices = {}
def make_pruned_model(self, dataloader, criterion, device, fisher_type, prune_ratio, is_loader=False, normalize=True, re_init=False, n_v=300):
self.prune_ratio = prune_ratio # use for some special case, particularly slq_full, slq_layer
self._prepare_model()
self.init_step()
if self.config.dataset == 'imagenet':
is_loader = True
self._compute_hessian_importance(dataloader, criterion, device, is_loader, n_v=n_v)
if self.use_decompose:
self._do_prune_ni(prune_ratio, self.config.ni_ratio ,re_init)
self._build_pruned_model_ni(re_init)
else:
self._do_prune(prune_ratio, re_init)
self._build_pruned_model(re_init)
self._rm_hooks()
self._clear_buffer()
return str(self.model)
def _prepare_model(self):
count = 0
for module in self.model.modules():
classname = module.__class__.__name__
if classname in self.known_modules:
self.modules.append(module)
count += 1
self.modules = self.modules[self.fix_layers:]
def _compute_hessian_importance(self, dataloader, criterion, device, is_loader, n_v=300):
###############
# Here, we use the fact that Conv does not have bias term
###############
if self.hessian_mode == 'trace':
for m in self.model.parameters():
# set requires_grad for convolution layers only
shape_list = [2, 4]
if self.use_decompose:
shape_list = [4]
if len(m.shape) in shape_list:
m.requires_grad = True
else:
m.requires_grad = False
trace_dir = f"../HAPresults/{self.config.dataset}_result/{self.config.network}{self.config.depth}/tract.npy"
print(trace_dir)
if os.path.exists(trace_dir):
print(f"Loading trace from {trace_dir}")
results = np.load(trace_dir, allow_pickle=True)
else:
results = get_trace_hut(self.model, dataloader, criterion, n_v=n_v, loader=is_loader, channelwise=True, layerwise=False)
np.save(trace_dir, results)
for m in self.model.parameters():
m.requires_grad = True
channel_trace, weighted_trace = [], []
for k, layer in enumerate(results):
channel_trace.append(torch.zeros(len(layer)))
weighted_trace.append(torch.zeros(len(layer)))
for cnt, channel in enumerate(layer):
channel_trace[k][cnt] = sum(channel) / len(channel)
for k, m in enumerate(self.modules):
tmp = []
for cnt, channel in enumerate(m.weight.data):
tmp.append( (channel_trace[k][cnt] * channel.detach().norm()**2 / channel.numel()).cpu().item())
self.importances[m] = (tmp, len(tmp))
self.W_pruned[m] = fetch_mat_weights(m, False)
elif self.hessian_mode == 'random':
# get uniform baseline
for k, m in enumerate(self.modules):
tmp = []
for cnt, channel in enumerate(m.weight.data):
tmp.append( np.random.randn() )
self.importances[m] = (tmp, len(tmp))
self.W_pruned[m] = fetch_mat_weights(m, False)
def _do_prune(self, prune_ratio, re_init):
# get threshold
all_importances = []
for m in self.modules:
imp_m = self.importances[m]
imps = imp_m[0]
all_importances += imps
all_importances = sorted(all_importances)
idx = int(prune_ratio * len(all_importances))
threshold = all_importances[idx]
threshold_recompute = get_threshold(all_importances, prune_ratio)
idx_recomputed = len(filter_indices(all_importances, threshold))
print('=> The threshold is: %.5f (%d), computed by function is: %.5f (%d).' %
(threshold, idx, threshold_recompute, idx_recomputed))
# do pruning
print('=> Conducting network pruning. Max: %.5f, Min: %.5f, Threshold: %.5f' %
(max(all_importances), min(all_importances), threshold))
self.logger.info("[Weight Importances] Max: %.5f, Min: %.5f, Threshold: %.5f." %
(max(all_importances), min(all_importances), threshold))
for idx, m in enumerate(self.modules):
imp_m = self.importances[m]
n_r = imp_m[1]
row_imps = imp_m[0]
row_indices = filter_indices(row_imps, threshold)
r_ratio = 1 - len(row_indices) / n_r
# compute row indices (out neurons)
if r_ratio > self.prune_ratio_limit:
r_threshold = get_threshold(row_imps, self.prune_ratio_limit)
row_indices = filter_indices(row_imps, r_threshold)
print('* row indices empty!')
if isinstance(m, nn.Linear) and idx == len(self.modules) - 1:
row_indices = list(range(self.W_pruned[m].size(0)))
m.out_indices = row_indices
m.in_indices = None
update_indices(self.model, self.network)
def _build_pruned_model(self, re_init):
for m_name, m in self.model.named_modules():
if isinstance(m, nn.BatchNorm2d):
idxs = m.in_indices
m.num_features = len(idxs)
m.weight.data = m.weight.data[idxs]
m.bias.data = m.bias.data[idxs].clone()
m.running_mean = m.running_mean[idxs].clone()
m.running_var = m.running_var[idxs].clone()
m.weight.grad = None
m.bias.grad = None
elif isinstance(m, nn.Conv2d):
in_indices = m.in_indices
if m.in_indices is None:
in_indices = list(range(m.weight.size(1)))
m.weight.data = m.weight.data[m.out_indices, :, :, :][:, in_indices, :, :].clone()
if m.bias is not None:
m.bias.data = m.bias.data[m.out_indices]
m.bias.grad = None
m.in_channels = len(in_indices)
m.out_channels = len(m.out_indices)
m.weight.grad = None
elif isinstance(m, nn.Linear):
in_indices = m.in_indices
if m.in_indices is None:
in_indices = list(range(m.weight.size(1)))
m.weight.data = m.weight.data[m.out_indices, :][:, in_indices].clone()
if m.bias is not None:
m.bias.data = m.bias.data[m.out_indices].clone()
m.bias.grad = None
m.in_features = len(in_indices)
m.out_features = len(m.out_indices)
m.weight.grad = None
if re_init:
self.model.apply(_weights_init)
def _do_prune_ni(self, prune_ratio, ni_ratio, re_init):
# get threshold
all_importances = []
for m in self.modules:
imp_m = self.importances[m]
imps = imp_m[0]
all_importances += imps
all_importances = sorted(all_importances)
idx = int(prune_ratio * len(all_importances))
ni_idx = int( (1-ni_ratio) *prune_ratio * len(all_importances))
threshold = all_importances[idx]
ni_threshold = all_importances[ni_idx]
# do pruning
print('=> Conducting network pruning. Max: %.5f, Min: %.5f, Threshold: %.5f' %
(max(all_importances), min(all_importances), threshold))
self.logger.info("[Weight Importances] Max: %.5f, Min: %.5f, Threshold: %.5f." %
(max(all_importances), min(all_importances), threshold))
for idx, m in enumerate(self.modules):
imp_m = self.importances[m]
n_r = imp_m[1]
row_imps = imp_m[0]
remained_indices, ni_indices, pruned_indices = filter_indices_ni(row_imps, threshold, ni_threshold)
r_ratio = (len(remained_indices) + len(ni_indices)) / n_r
# compute row indices (out neurons)
if r_ratio > self.prune_ratio_limit:
row_imps = sorted(row_imps)
idx = int(self.prune_ratio_limit * len(row_imps))
ni_idx = int( (1-ni_ratio) *prune_ratio * len(row_imps))
tmp_threshold = row_imps[idx]
tmp_ni_threshold = row_imps[ni_idx]
remained_indices, ni_indices, pruned_indices = filter_indices_ni(row_imps, tmp_threshold, tmp_ni_threshold)
print('* row indices empty!')
if isinstance(m, nn.Linear) and idx == len(self.modules) - 1:
row_indices = list(range(self.W_pruned[m].size(0)))
m.remained_indices = remained_indices
m.ni_indices = ni_indices
m.pruned_indices = pruned_indices
m.out_indices = sorted(m.remained_indices + m.ni_indices)
m.in_indices = None
update_indices(self.model, self.network)
def _build_pruned_model_ni(self, re_init):
for m in self.model.modules():
if isinstance(m, nn.BatchNorm2d):
idxs = m.in_indices
# print(len(idxs))
m.num_features = len(idxs)
m.weight.data = m.weight.data[idxs]
m.bias.data = m.bias.data[idxs].clone()
m.running_mean = m.running_mean[idxs].clone()
m.running_var = m.running_var[idxs].clone()
m.weight.grad = None
m.bias.grad = None
elif isinstance(m, nn.Linear):
in_indices = m.in_indices
if m.in_indices is None:
in_indices = list(range(m.weight.size(1)))
m.weight.data = m.weight.data[:, in_indices].clone()
if m.bias is not None:
m.bias.data = m.bias.data.clone()
m.bias.grad = None
m.in_features = len(in_indices)
m.weight.grad = None
self.model = prune_model_ni(self.model.module)
if re_init:
self.model.apply(_weights_init)
def init_step(self):
self.steps = 0
def step(self):
self.steps += 1
def _rm_hooks(self):
for m in self.model.modules():
classname = m.__class__.__name__
if classname in self.known_modules:
m._backward_hooks = OrderedDict()
m._forward_pre_hooks = OrderedDict()
def _clear_buffer(self):
self.m_aa = {}
self.m_gg = {}
self.d_a = {}
self.d_g = {}
self.Q_a = {}
self.Q_g = {}
self.modules = []
if self.S_l is not None:
self.S_l = {}
def fine_tune_model(self, trainloader, testloader, criterion, optim, learning_rate, weight_decay, nepochs=10,
device='cuda'):
self.model = self.model.train()
self.model = self.model.cpu()
self.model = self.model.to(device)
optimizer = optim.SGD(self.model.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay)
# optimizer = optim.Adam(self.model.parameters(), weight_decay=5e-4)
if self.config.dataset == "cifar10":
lr_schedule = {0: learning_rate,
int(nepochs * 0.5): learning_rate * 0.1,
int(nepochs * 0.75): learning_rate * 0.01}
elif self.config.dataset == "imagenet":
lr_schedule = {0 : learning_rate,
30: learning_rate * 0.1,
60: learning_rate * 0.01}
lr_scheduler = PresetLRScheduler(lr_schedule)
best_test_acc, best_test_loss = 0, 100
iterations = 0
for epoch in range(nepochs):
self.model = self.model.train()
correct = 0
total = 0
all_loss = 0
lr_scheduler(optimizer, epoch)
desc = ('[LR: %.5f] Loss: %.3f | Acc: %.3f%% (%d/%d)' % (
lr_scheduler.get_lr(optimizer), 0, 0, correct, total))
prog_bar = tqdm(enumerate(trainloader), total=len(trainloader), desc=desc, leave=True)
for batch_idx, (inputs, targets) in prog_bar:
optimizer.zero_grad()
inputs, targets = inputs.to(device), targets.to(device)
outputs = self.model(inputs)
loss = criterion(outputs, targets)
self.writer.add_scalar('train_%d/loss' % self.iter, loss.item(), iterations)
iterations += 1
all_loss += loss.item()
loss.backward()
optimizer.step()
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
desc = ('[%d][LR: %.5f, WD: %.5f] Loss: %.3f | Acc: %.3f%% (%d/%d)' %
(epoch, lr_scheduler.get_lr(optimizer), weight_decay, all_loss / (batch_idx + 1),
100. * correct / total, correct, total))
prog_bar.set_description(desc, refresh=True)
test_loss, test_acc, top5_acc = self.test_model(testloader, criterion, device)
self.logger.info(f'{epoch} Test Loss: %.3f, Test Top1 %.2f%%(test), Test Top5 %.2f%%(test).' % (test_loss, test_acc, top5_acc))
if test_acc > best_test_acc:
best_test_loss = test_loss
best_test_acc = test_acc
network = self.config.network
depth = self.config.depth
dataset = self.config.dataset
path = os.path.join(self.config.checkpoint, '%s_%s%s.pth.tar' % (dataset, network, depth))
save = {
'args': self.config,
'net': self.model,
'acc': test_acc,
'loss': test_loss,
'epoch': epoch
}
torch.save(save, path)
print('** Finetuning finished. Stabilizing batch norm and test again!')
stablize_bn(self.model, trainloader)
test_loss, test_acc, top5_acc = self.test_model(testloader, criterion, device)
best_test_loss = best_test_loss if best_test_acc > test_acc else test_loss
best_test_acc = max(test_acc, best_test_acc)
return best_test_loss, best_test_acc
def test_model(self, dataloader, criterion, device='cuda'):
self.model = self.model.eval()
self.model = self.model.cpu()
self.model = self.model.to(device)
correct = 0
top_1_correct = 0
top_5_correct = 0
total = 0
all_loss = 0
desc = ('Loss: %.3f | Acc: %.3f%% (%d/%d)' % (0, 0, correct, total))
prog_bar = tqdm(enumerate(dataloader), total=len(dataloader), desc=desc, leave=True)
for batch_idx, (inputs, targets) in prog_bar:
inputs, targets = inputs.to(device), targets.to(device)
outputs = self.model(inputs)
loss = criterion(outputs, targets)
all_loss += loss.item()
total += targets.size(0)
_, pred = outputs.topk(5, 1, True, True)
pred = pred.t()
correct = pred.eq(targets.view(1, -1).expand_as(pred))
top_1_correct += correct[:1].contiguous().view(-1).float().sum(0)
top_5_correct += correct[:5].contiguous().view(-1).float().sum(0)
desc = ('Loss: %.3f | Top1: %.3f%% | Top5: %.3f%% ' %
(all_loss / (batch_idx + 1), 100. * top_1_correct / total, 100. * top_5_correct / total))
prog_bar.set_description(desc, refresh=True)
return all_loss / (batch_idx + 1), 100. * float(top_1_correct / total), 100. * float(top_5_correct / total)
def speed_model(self, dataloader, criterion, device='cuda'):
self.model = self.model.eval()
self.model = self.model.cpu()
self.model = self.model.to(device)
# warm-up
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(dataloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = self.model(inputs)
if batch_idx == 999:
break
# time maesure
start = time.time()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(dataloader):
inputs, targets = inputs.to(device), targets.to(device)
outputs = self.model(inputs)
if batch_idx == 999:
break
end = time.time()
return end - start
|
11526870
|
from genmod.annotate_models.models import check_X_dominant
from genmod.vcf_tools import Genotype
from ped_parser import FamilyParser
FAMILY_FILE = "tests/fixtures/recessive_trio.ped"
def get_family(family_file = None, family_lines = None):
"""Return a family object
"""
family = None
if family_file:
family = FamilyParser(open(family_file, 'r'))
elif family_lines:
family = FamilyParser(family_lines)
return family
################# Test affected ###############
def test_x_affected_recessive_male():
"""Test a sick male
"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t1\t2\n"
]
family = get_family(family_lines=family_lines)
recessive_variant = {'genotypes': {}}
recessive_variant['genotypes']['proband'] = Genotype(**{'GT':'0/1'})
assert check_X_dominant(
variant = recessive_variant,
family = family
) == True
def test_x_affected_recessive_female():
"""Test a sick heterozygote female
Females needs to bo hom alt to follow pattern
"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t2\t2\n"
]
family = get_family(family_lines=family_lines)
recessive_variant = {'genotypes': {}}
recessive_variant['genotypes']['proband'] = Genotype(**{'GT':'0/1'})
assert check_X_dominant(
variant = recessive_variant,
family = family
) == True
def test_x_affected_homozygote_male():
"""Test an affected homozygote male"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t1\t2\n"
]
family = get_family(family_lines=family_lines)
homozygote_variant = {'genotypes': {}}
homozygote_variant['genotypes']['proband'] = Genotype(**{'GT':'1/1'})
assert check_X_dominant(
variant = homozygote_variant,
family = family
) == True
def test_x_affected_homozygote_female():
"""Test an affected homozygote male"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t2\t2\n"
]
family = get_family(family_lines=family_lines)
homozygote_variant = {'genotypes': {}}
homozygote_variant['genotypes']['proband'] = Genotype(**{'GT':'1/1'})
assert check_X_dominant(
variant = homozygote_variant,
family = family
) == True
def test_x_affected_male_ref_call():
"""Test an affected ref call male"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t1\t2\n"
]
family = get_family(family_lines=family_lines)
homozygote_variant = {'genotypes': {}}
homozygote_variant['genotypes']['proband'] = Genotype(**{'GT':'0/0'})
assert check_X_dominant(
variant = homozygote_variant,
family = family
) == False
def test_x_affected_female_ref_call():
"""Test an affected ref call male"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t2\t2\n"
]
family = get_family(family_lines=family_lines)
homozygote_variant = {'genotypes': {}}
homozygote_variant['genotypes']['proband'] = Genotype(**{'GT':'0/0'})
assert check_X_dominant(
variant = homozygote_variant,
family = family
) == False
def test_x_affected_no_call_male():
"""Test a sick male with no gt call
This should be true since there is no information that contradicts the model
"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t1\t2\n"
]
family = get_family(family_lines=family_lines)
no_call_variant = {'genotypes': {}}
no_call_variant['genotypes']['proband'] = Genotype(**{'GT':'./.'})
assert check_X_dominant(
variant = no_call_variant,
family = family
) == True
def test_x_affected_no_call_male_strict():
"""Test a sick male with no gt call
This should not be true since we allways need 'proof'
for an inheritance pattern if strict mode.
"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t1\t2\n"
]
family = get_family(family_lines=family_lines)
no_call_variant = {'genotypes': {}}
no_call_variant['genotypes']['proband'] = Genotype(**{'GT':'./.'})
assert check_X_dominant(
variant = no_call_variant,
family = family,
strict = True
) == False
############### Test healthy ##############
def test_x_healthy_recessive_male():
"""Test a healthy recessive male
"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t1\t1\n"
]
family = get_family(family_lines=family_lines)
recessive_variant = {'genotypes': {}}
recessive_variant['genotypes']['proband'] = Genotype(**{'GT':'0/1'})
assert check_X_dominant(
variant = recessive_variant,
family = family
) == False
def test_x_healthy_recessive_female():
"""Test a healthy heterozygote female
Females needs to bo hom alt to follow pattern
"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t2\t1\n"
]
family = get_family(family_lines=family_lines)
recessive_variant = {'genotypes': {}}
recessive_variant['genotypes']['proband'] = Genotype(**{'GT':'0/1'})
assert check_X_dominant(
variant = recessive_variant,
family = family
) == True
def test_x_healthy_homozygote_male():
"""Test an healthy homozygote male"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t1\t1\n"
]
family = get_family(family_lines=family_lines)
homozygote_variant = {'genotypes': {}}
homozygote_variant['genotypes']['proband'] = Genotype(**{'GT':'1/1'})
assert check_X_dominant(
variant = homozygote_variant,
family = family
) == False
def test_x_healthy_homozygote_female():
"""Test an healthy homozygote female"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t2\t1\n"
]
family = get_family(family_lines=family_lines)
homozygote_variant = {'genotypes': {}}
homozygote_variant['genotypes']['proband'] = Genotype(**{'GT':'1/1'})
assert check_X_dominant(
variant = homozygote_variant,
family = family
) == False
def test_x_healthy_male_ref_call():
"""Test an healthy ref call male"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t1\t1\n"
]
family = get_family(family_lines=family_lines)
homozygote_variant = {'genotypes': {}}
homozygote_variant['genotypes']['proband'] = Genotype(**{'GT':'0/0'})
assert check_X_dominant(
variant = homozygote_variant,
family = family
) == True
def test_x_healthy_female_ref_call():
"""Test an healthy female ref call"""
family_lines = [
"#FamilyID\tSampleID\tFather\tMother\tSex\tPhenotype\n",
"1\tproband\t0\t0\t2\t1\n"
]
family = get_family(family_lines=family_lines)
homozygote_variant = {'genotypes': {}}
homozygote_variant['genotypes']['proband'] = Genotype(**{'GT':'0/0'})
assert check_X_dominant(
variant = homozygote_variant,
family = family
) == True
|
11526889
|
from matplotlib import pyplot as plt
import matplotlib as mpl
import numpy as np
%config InlineBackend.figure_format='retina'
%matplotlib inline
"""
Log scale discrete colormaps with matplotlib which you can happily copy-paste in jupyter notebook
(inspired by http://stackoverflow.com/questions/14777066/matplotlib-discrete-colorbar)
"""
x = [ 6.23343507e-03, 2.81348181e-02, 4.68303411e-03, 3.42539566e-01, 3.45920197e-03, 2.90532859e-04, 3.08496503e-05, 2.63339694e-03, 2.63879760e-07]
y = [ 0.80633933, 0.40211565, 0.68903025, 0.4414968, 0.45019223, 0.35095171, 0.37928863, 0.2779556, 0.27843539]
z = [ 4.25201185, 4.11153144, 4.23549925, 4.0512849, 4.05836118, 4.01689744, 4.02224028, 3.96961677, 3.96906273]
nbins = 8
log_x = np.log10(x)
new_range = (log_x - np.min(log_x))*(nbins/(np.max(log_x)-np.min(log_x)))
# setup the plot
fig, ax = plt.subplots(1,1, figsize=(6,6))
# define the colormap
cmap = plt.cm.YlGnBu
# extract all colors from the .YlGnBu map
cmaplist = [cmap(i) for i in range(cmap.N)]
# create the new map
cmap = cmap.from_list('Custom cmap', cmaplist, cmap.N)
# define the bins and normalize
bounds = np.linspace(0,nbins,nbins+1)
myticks = [10**-i for i in range(nbins+1)][::-1]
norm = mpl.colors.BoundaryNorm(bounds, cmap.N)
# make the scatter
scat = ax.scatter(y,z,c=new_range,s=100,cmap=cmap, norm=norm)
# create a second axes for the colorbar
ax2 = fig.add_axes([0.95, 0.1, 0.03, 0.8])
cbar = mpl.colorbar.ColorbarBase(ax2, cmap=cmap, norm=norm, spacing='proportional', ticks=bounds, boundaries=bounds, format='%1i')
cbar.ax.set_yticklabels(myticks)
ax.set_title('Well defined discrete colors')
ax2.set_ylabel('Very custom cbar [-]', size=12)
|
11526905
|
import pytest
import sklearn.metrics
from isic_challenge_scoring import metrics
def test_binary_accuracy_reference(
real_cm, real_truth_binary_values, real_prediction_binary_values
):
value = metrics.binary_accuracy(real_cm)
reference_value = sklearn.metrics.accuracy_score(
real_truth_binary_values, real_prediction_binary_values
)
assert value == pytest.approx(reference_value)
def test_binary_sensitivity_reference(
real_cm, real_truth_binary_values, real_prediction_binary_values
):
value = metrics.binary_sensitivity(real_cm)
reference_value = sklearn.metrics.recall_score(
real_truth_binary_values, real_prediction_binary_values
)
assert value == pytest.approx(reference_value)
def test_binary_jaccard_reference(real_cm, real_truth_binary_values, real_prediction_binary_values):
value = metrics.binary_jaccard(real_cm)
reference_value = sklearn.metrics.jaccard_score(
real_truth_binary_values, real_prediction_binary_values
)
assert value == pytest.approx(reference_value)
def test_binary_dice_reference(real_cm, real_truth_binary_values, real_prediction_binary_values):
value = metrics.binary_dice(real_cm)
reference_value = sklearn.metrics.f1_score(
real_truth_binary_values, real_prediction_binary_values
)
assert value == pytest.approx(reference_value)
def test_binary_ppv_reference(real_cm, real_truth_binary_values, real_prediction_binary_values):
value = metrics.binary_ppv(real_cm)
reference_value = sklearn.metrics.precision_score(
real_truth_binary_values, real_prediction_binary_values
)
assert value == pytest.approx(reference_value)
def test_jaccard_dice_equality(real_cm):
# Some mathematical equalities which will always hold
jaccard = metrics.binary_jaccard(real_cm)
dice = metrics.binary_dice(real_cm)
assert dice == (2 * jaccard) / (1.0 + jaccard)
assert jaccard == dice / (2.0 - dice)
|
11526916
|
import io
import logging
import pytest
from abcvoting.output import Output, VERBOSITY_TO_NAME, DETAILS, INFO
@pytest.mark.parametrize("verbosity", VERBOSITY_TO_NAME.keys())
def test_verbosity(capfd, verbosity):
output = Output(verbosity=verbosity)
output.debug2("debug2")
output.debug("debug")
output.details("details")
output.info("info")
output.warning("warning")
output.error("error")
output.critical("critical")
stdout = capfd.readouterr().out
for verbosity_value, verbosity_name in VERBOSITY_TO_NAME.items():
if verbosity_value >= verbosity:
assert verbosity_name.lower() in stdout
else:
assert verbosity_name.lower() not in stdout
def test_verbosity2(capfd):
output = Output(verbosity=INFO)
output.details("details")
output.info("info")
stdout = capfd.readouterr().out
assert "info\n" in stdout
assert "details\n" not in stdout
output.set_verbosity(DETAILS)
output.details("details")
output.info("info")
stdout = capfd.readouterr().out
assert "info\n" in stdout
assert "details\n" in stdout
@pytest.mark.parametrize("verbosity", [INFO, DETAILS])
def test_logger(capfd, verbosity):
logger = logging.getLogger("testoutput")
logger.setLevel(logging.DEBUG)
logger_output = io.StringIO("test")
handler = logging.StreamHandler(stream=logger_output)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
output = Output(verbosity=verbosity, logger=logger)
output.info("info")
output.debug2("debug2")
output.debug("debug")
output.details("details")
handler.flush()
stdout = capfd.readouterr().out
logger_output_str = logger_output.getvalue()
assert "info\n" in stdout
assert "debug2\n" not in stdout
if verbosity <= DETAILS:
assert "details\n" in stdout
# always printed, independent of verbosity, determined by logger's level
assert "info\n" in logger_output_str
assert "details\n" in logger_output_str
assert "debug2\n" in logger_output_str
|
11526922
|
from django.urls import path
from django.views.generic import TemplateView
from . import views
urlpatterns = [
path('pdf', views.pdf),
# path('docx', views.docx),
path('preview', TemplateView.as_view(template_name='dashboard/stattalon_preview.html')),
path('extra-nofication', views.extra_nofication),
path('covid-result', views.covid_result),
path('json-nofication', views.json_nofication),
]
|
11526935
|
import z3
MIN_BASE = 0x10000
def bvs(name: str, size: int):
return z3.BitVec(name, size)
def bvv(val: int, size: int):
return z3.BitVecVal(val, size)
def split_bv_in_list(bv: z3.BitVecRef, size: int) -> list:
assert size % 8 == 0
res = []
for i in range(0, bv.size, size):
b = z3.simplify(z3.Extract(i+size-1, i, bv))
res.append(b)
return res
def bvv_from_bytes(val: bytes): # DONT USE IT TO CREATE LONG BV!!
res = None
for c in val:
v = z3.BitVecVal(c, 8)
res = z3.Concat(res, v) if res is not None else v
return res
def split_bv(bv: z3.BitVecRef, split_index: int):
return (
# most significant
z3.simplify(z3.Extract(bv.size - 1, split_index, bv)),
z3.simplify(z3.Extract(split_index - 1, 0, bv)
) # least significant
)
def symbolic(val: z3.BitVecRef) -> bool:
return z3.simplify(val).decl().kind() != z3.Z3_OP_BNUM
def bvv_to_long(val: z3.BitVecRef):
assert not symbolic(val)
return z3.simplify(val).value
def heuristic_find_base(val: z3.BitVecRef):
fringe = val.children()
while fringe:
el = fringe.pop()
if not symbolic(el) and el.value > MIN_BASE:
return el.value
fringe.extend(el.children())
return -1
|
11526945
|
from blobrl.explorations import AdaptativeEpsilonGreedy
def test_adaptative_epsilon_greedy_step_min():
exploration = AdaptativeEpsilonGreedy(0.8, 0.1, 0.99)
exploration.be_greedy(0)
def test_adaptative_epsilon_greedy_step():
exploration = AdaptativeEpsilonGreedy(0.8, 0.1, 0.99)
exploration.be_greedy(5)
def test__str__():
explo = AdaptativeEpsilonGreedy(0.8, 0.1, 0.99)
assert 'AdaptativeEpsilonGreedy-' + str(explo.epsilon_max) + '-' + str(explo.epsilon_min) + '-' + str(
explo.gamma) == explo.__str__()
|
11527007
|
def headerprint(label):
print "\n\033[94m{0}\033[0m".format(label)
print "------------------------------------------------------------------\n"
def cprint(label, val):
print "\033[92m{0}:\033[0m {1}".format(label, val)
|
11527036
|
import argparse
def get_parser():
parser = argparse.ArgumentParser(description="Implementation of Semantic Image Synthesis with Spatially-Adaptive Normalization")
# Dataloader
parser.add_argument('--path', required=True, help='path to the image folder')
parser.add_argument('--img-size', dest='img_size', nargs='+', default=[256,256], type=int,
help='The size of images for training and validation')
parser.add_argument('--batch-size', dest='batch_size', type=int, default=16,
help='Batch size for the dataloaders for train and val set')
parser.add_argument('--num-workers', dest='num_workers', type=int, default=4,
help='Number of CPU cores you want to use for data loading')
# SPADE normalization layer
parser.add_argument('--spade-filter', dest='spade_filter', default=128, type=int,
help='The filter size to use in SPADE block')
parser.add_argument('--sapde-kernel', dest='spade_kernel', default=3, type=int,
help='The kernel size to use in SPADE block')
# SPADE ResBlk
# You can add flags here depending on you want to do addition, concatenation of the ouputs
parser.add_argument('--spade-resblk-kernel', dest='spade_resblk_kernel', default=3, type=int,
help='The kernel size to be used for the conv layers in SPADE ResBlk')
# SPADE Generator
parser.add_argument('--gen-input-size', dest='gen_input_size', default=256, type=int,
help='The noise size to be given to generator')
parser.add_argument('--gen-hidden-size', dest='gen_hidden_size', default=16384, type=int,
help='Hidden size for the first layer of generator')
# Training arguments
parser.add_argument('--epochs', dest='epochs', type=int, default=100,
help='Number of epochs to run of training')
parser.add_argument('--lr_gen', dest='lr_gen', type=int, default=0.0001,
help='Learning rate of generator')
parser.add_argument('--lr_dis', dest='lr_dis', type=int, default=0.0004,
help='Learning rate of discriminator')
return parser
|
11527049
|
import sys
import numpy as np
from .. import face
from ..utils import utils
from ..utils.recognize import distance
class Face_test:
def __init__(self, model):
self.model = model
self.labels = np.array(model['labels'])
self.encodes = np.array(model['encodes'])
self.label_map = model['label_map']
def translate_label(self,flag_array):
return [self.label_map[l] for l in flag_array]
def predict_with_encode_faces(self, encode_faces, tolerance=0.6):
if not encode_faces:
return []
dis = distance(self.encodes, encode_faces)
maybe_result = [np.where(d < tolerance) for d in dis]
return [{'label': self.translate_label(self.labels[res]), 'posibility':dis[res]} for res, dis in zip(maybe_result, dis)]
def predict_with_image(self, image, tolerance=0.6):
# find all faces and encode
detect_result = face.detect_face_and_encode(image)
encoded_faces = detect_result['encoded_faces']
recognize_result = []
if encoded_faces:
# for encoded_face in encoded_faces:
recognize_result = self.predict_with_encode_faces(tolerance, 1)
return {'recognize_result':recognize_result,
'detect_result':detect_result}
|
11527052
|
import asyncio
import gc
import pytest
from async_class import AsyncObject, TaskStore, link, task
class GlobalInitializedClass(AsyncObject):
pass
global_initialized_instance = GlobalInitializedClass()
async def test_global_initialized_instance(loop):
await global_initialized_instance
assert not global_initialized_instance.is_closed
async def test_simple():
await AsyncObject()
async def test_simple_class():
class Simple(AsyncObject):
event = asyncio.Event()
async def __ainit__(self):
self.loop.call_soon(self.event.set)
await self.event.wait()
instance = await Simple()
assert instance.__class__ == Simple
assert Simple.event.is_set()
async def test_simple_inheritance():
class Simple(AsyncObject):
event = asyncio.Event()
async def __ainit__(self):
self.loop.call_soon(self.event.set)
await self.event.wait()
def __del__(self):
return super().__del__()
class MySimple(Simple):
pass
instance = await MySimple()
assert instance.__class__ == MySimple
assert instance.__class__ != Simple
assert Simple.event.is_set()
assert MySimple.event.is_set()
async def test_simple_with_init():
class Simple(AsyncObject):
event = asyncio.Event()
def __init__(self):
super().__init__()
self.value = 3
async def __ainit__(self):
self.loop.call_soon(self.event.set)
await self.event.wait()
instance = await Simple()
assert instance.__class__ == Simple
assert Simple.event.is_set()
assert instance.value == 3
async def test_simple_with_init_inheritance():
class Simple(AsyncObject):
event = asyncio.Event()
def __init__(self):
super().__init__()
self.value = 3
async def __ainit__(self):
self.loop.call_soon(self.event.set)
await self.event.wait()
class MySimple(Simple):
pass
instance = await MySimple()
assert instance.__class__ == MySimple
assert Simple.event.is_set()
assert MySimple.event.is_set()
assert instance.value == 3
async def test_non_corotine_ainit():
with pytest.raises(TypeError):
class _(AsyncObject):
def __ainit__(self):
pass
async def test_async_class_task_store():
class Sample(AsyncObject):
async def __ainit__(self):
self.future = self.create_future()
self.task = self.create_task(asyncio.sleep(3600))
obj = await Sample()
assert obj.__tasks__
assert isinstance(obj.__tasks__, TaskStore)
assert not obj.future.done()
assert not obj.task.done()
await obj.close()
assert obj.future.done()
assert obj.task.done()
assert obj.is_closed
await obj.close()
del obj
async def test_async_class_inherit_from():
class Parent(AsyncObject):
pass
class Child(Parent):
async def __ainit__(self, parent: Parent):
link(self, parent)
parent = await Parent()
child = await Child(parent)
assert not child.is_closed
await parent.close(asyncio.CancelledError)
assert parent.is_closed
assert parent.__tasks__.is_closed
assert child.__tasks__.is_closed
assert child.is_closed
async def test_await_redeclaration():
with pytest.raises(TypeError):
class _(AsyncObject):
def __await__(self):
pass
async def test_close_uninitialized(loop):
future = asyncio.Future()
class Sample(AsyncObject):
async def __ainit__(self, *args, **kwargs):
await future
instance = Sample()
task: asyncio.Task = loop.create_task(instance.__await__())
await asyncio.sleep(0.1)
await instance.close()
assert task.done()
with pytest.raises(asyncio.CancelledError):
await task
assert future.done()
with pytest.raises(asyncio.CancelledError):
await future
def callback_regular():
pass
def callback_with_raise():
return 1 / 0
@pytest.mark.parametrize("callback", [callback_regular, callback_with_raise])
async def test_close_callabacks(callback):
class Sample(AsyncObject):
pass
instance = await Sample()
event = asyncio.Event()
instance.__tasks__.add_close_callback(event.set)
instance.__tasks__.add_close_callback(callback)
await instance.close()
assert event.is_set()
async def test_del():
class Sample(AsyncObject):
pass
instance = await Sample()
event = asyncio.Event()
instance.__tasks__.add_close_callback(event.set)
del instance
await event.wait()
async def test_del_child():
class Parent(AsyncObject):
pass
class Child(Parent):
async def __ainit__(self, parent: Parent):
link(self, parent)
parent = await Parent()
parent_event = asyncio.Event()
parent.__tasks__.add_close_callback(parent_event.set)
child = await Child(parent)
child_event = asyncio.Event()
child.__tasks__.add_close_callback(child_event.set)
del child
for generation in range(3):
gc.collect(generation)
await child_event.wait()
assert not parent_event.is_set()
async def test_link_init():
class Parent(AsyncObject):
pass
class Child(Parent):
def __init__(self, parent: Parent):
super().__init__()
link(self, parent)
parent = await Parent()
parent_event = asyncio.Event()
parent.__tasks__.add_close_callback(parent_event.set)
child = await Child(parent)
child_event = asyncio.Event()
child.__tasks__.add_close_callback(child_event.set)
del child
for generation in range(3):
gc.collect(generation)
await child_event.wait()
assert not parent_event.is_set()
async def test_close_non_initialized():
class Sample(AsyncObject):
pass
sample = Sample()
await sample.close()
async def test_task_decorator():
class Sample(AsyncObject):
@task
async def sleep(self, *args):
return await asyncio.sleep(*args)
sample = await Sample()
result = sample.sleep(0)
assert isinstance(result, asyncio.Task)
result.cancel()
with pytest.raises(asyncio.CancelledError):
await result
await sample.sleep(0)
|
11527061
|
from typing import Dict, List
def parse_version_requirements(packages: str) -> Dict[str, Dict[str, str]]:
"""
Parses a list of requirements in the format <package_name><PEP_OPERATOR><SEM_VER>
into a dictionary with the format.
{
<package_name>: {
"operator": <PEP_OPERATOR>,
"version": <SEM_VER>
}
}
:param packages: List of packages with operators and semver
:return: dict
"""
def _parse_by_operator(operator: str, package: str) -> List[str]:
return package.split(operator)
parsed_results = {}
potential_operators = ["!=", "==", ">=", "<=", ">", "<", "="]
for package in packages:
found = False
for oper in potential_operators:
if oper in package and not found:
parsed = _parse_by_operator(oper, package)
parsed_results[parsed[0]] = {"operator": oper, "version": parsed[1]}
found = True
if not found:
parsed_results[package] = {}
return parsed_results
|
11527064
|
from setuptools import find_packages, setup
with open('requirements.txt') as f:
required = f.read().splitlines()
with open("README.rst", "r", encoding="utf-8") as f:
README = f.read()
setup(
name='scrapy-x',
packages=find_packages(),
install_requires=required,
version='3.0',
author='<NAME>',
author_email='<EMAIL>',
license='Apache License V2.0',
description='a scrapy subcommand for easily enqueuing crawling jobs in a scalable and high performance way',
summary='a scrapy serving module',
url='https://github.com/alash3al/scrapyx',
python_requires='>=3.6.9',
entry_points={
'scrapy.commands': [
'x=scrapyx.x:Command',
],
},
long_description=README,
)
|
11527074
|
import os
from easyreg.reg_data_utils import write_list_into_txt, loading_img_list_from_files,generate_pair_name
from glob import glob
def generate_atlas_set(original_txt_path,atlas_path,l_atlas_path, output_path,phase='train',test_phase_path_list=None, test_phase_l_path_list=None):
if phase!="test":
source_path_list,target_path_list,l_source_path_list, l_target_path_list=loading_img_list_from_files(original_txt_path)
else:
source_path_list =test_phase_path_list
l_source_path_list = test_phase_l_path_list
target_path_list = []
l_target_path_list = []
source_path_list =source_path_list+target_path_list
file_num = len(source_path_list)
l_source_path_list = l_source_path_list+l_target_path_list
target_path_list = [atlas_path for _ in range(file_num)]
l_target_path_list = [l_atlas_path for _ in range(file_num)]
if l_source_path_list is not None and l_target_path_list is not None:
assert len(source_path_list) == len(l_source_path_list)
file_list = [[source_path_list[i], target_path_list[i],l_source_path_list[i],l_target_path_list[i]] for i in range(file_num)]
else:
file_list = [[source_path_list[i], target_path_list[i]] for i in range(file_num)]
output_phase_path = os.path.join(output_path,phase)
os.makedirs(output_phase_path,exist_ok=True)
pair_txt_path = os.path.join(output_phase_path,'pair_path_list.txt')
fn_txt_path = os.path.join(output_phase_path,'pair_name_list.txt')
fname_list = [generate_pair_name([file_list[i][0],file_list[i][1]]) for i in range(file_num)]
write_list_into_txt(pair_txt_path,file_list)
write_list_into_txt(fn_txt_path,fname_list)
output_path = '/playpen-raid/zyshen/oai_data/reg_test_for_atlas'
# original_txt_path_dict={'train':'/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter/train/pair_path_list.txt',
# 'val':'/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter/val/pair_path_list.txt',
# 'debug':'/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter/debug/pair_path_list.txt'}
# atlas_path = '/playpen-raid/zyshen/oai_data/croped_atlas/atlas.nii.gz'
# l_atlas_path = '/playpen-raid/zyshen/oai_data/croped_atlas/atlas_label.nii.gz'
original_txt_path_dict={phase:'/playpen-raid/zyshen/data/croped_for_reg_debug_3000_pair_oai_reg_inter/{}/pair_path_list.txt'.format(phase) for phase in ['train','val','debug']}
atlas_path = '/playpen-raid/zyshen/oai_data/atlas/atlas.nii.gz'
l_atlas_path = '/playpen-raid/zyshen/oai_data/atlas/atlas_label.nii.gz'
test_phase_folder = "/playpen-raid/zyshen/oai_data/Nifti_rescaled"
test_phase_path_list = glob(os.path.join(test_phase_folder, "*_image.nii.gz"))
test_phase_l_path_list = glob(os.path.join(test_phase_folder, "*_label_all.nii.gz"))
resize_atlas = False
if resize_atlas:
from tools.image_rescale import resize_input_img_and_save_it_as_tmp
atlas_path=resize_input_img_and_save_it_as_tmp(atlas_path,is_label=False,saving_path='/playpen-raid/zyshen/oai_data/croped_atlas',fname='atlas.nii.gz')
l_atlas_path = resize_input_img_and_save_it_as_tmp(l_atlas_path,is_label=True,saving_path='/playpen-raid/zyshen/oai_data/croped_atlas',fname='atlas_label.nii.gz')
for phase,txt_path in original_txt_path_dict.items():
generate_atlas_set(txt_path,atlas_path,l_atlas_path,output_path,phase)
generate_atlas_set(None,atlas_path,l_atlas_path,output_path,"test",test_phase_path_list,test_phase_l_path_list)
|
11527087
|
from persian_tools import digits
def test_convert_to_fa():
assert digits.convert_to_fa('123٤٥٦') == '۱۲۳۴۵۶'
assert digits.convert_to_fa(123456) == '۱۲۳۴۵۶'
def test_convert_to_ar():
assert digits.convert_to_ar('123۴۵۶') == '١٢٣٤٥٦'
assert digits.convert_to_ar(123.456) == '١٢٣.٤٥٦'
def test_convert_to_en():
assert digits.convert_to_en('۱۲۳٤٥٦') == '123456'
def test_conversion_function():
assert digits._conversion('123', 'de') is None
def test_convert_to_word():
assert digits.convert_to_word(500443) == 'پانصد هزار و چهارصد و چهل و سه'
assert len(digits.convert_to_word(500)) == 5
assert digits.convert_to_word(30000000000) == 'سی میلیارد'
assert digits.convert_to_word(30000000000000) == 'سی بیلیون'
assert digits.convert_to_word(30000000000000000) == 'سی بیلیارد'
assert digits.convert_to_word(30000000000000000000) is None
assert digits.convert_to_word(0) == 'صفر'
assert digits.convert_to_word(500443, ordinal=True) == 'پانصد هزار و چهارصد و چهل و سوم'
assert digits.convert_to_word(-30, ordinal=True) == 'منفی سی اُم'
assert digits.convert_to_word(33, ordinal=True) == 'سی و سوم'
assert digits.convert_to_word(45, ordinal=True) == 'چهل و پنجم'
def test_convert_from_word():
assert digits.convert_from_word('') is None
assert digits.convert_from_word(None) is None
assert digits.convert_from_word('متن بدون عدد') == 0
assert digits.convert_from_word('صفر') == 0
assert digits.convert_from_word('منفی سه هزار') == -3000
assert digits.convert_from_word('سه هزار دویست و دوازده') == 3212
assert digits.convert_from_word('دوازده هزار بیست دو') == 12022
assert digits.convert_from_word('دوازده هزار بیست دو', separator=True) == '12,022'
assert digits.convert_from_word('دوازده هزار و بیست و دو', separator=True) == '12,022'
assert digits.convert_from_word('شیش صد و بیست و هفت') == 627
assert digits.convert_from_word('حقوق شیش صد و ۲۷ میلیون تومان سالانه') == 627 * 1000 * 1000
def test_convert_from_word_to_ar():
assert digits.convert_from_word("منفی سه هزار", digits="ar") == "-٣٠٠٠"
assert digits.convert_from_word("سه هزار دویست و دوازده", digits="ar") == "٣٢١٢"
assert digits.convert_from_word("دوازده هزار بیست دو", digits="ar") == "١٢٠٢٢"
assert digits.convert_from_word("دوازده هزار بیست دو", digits="ar", separator=True) == "١٢,٠٢٢"
assert digits.convert_from_word("دوازده هزار و بیست و دو", digits="ar", separator=True) == "١٢,٠٢٢"
assert digits.convert_from_word("چهارصد پنجاه هزار", digits="ar", separator=True) == "٤٥٠,٠٠٠"
assert digits.convert_from_word("چهارصد پنجاه هزار", digits="ar") == "٤٥٠٠٠٠"
def test_convert_from_word_with_ordinal():
assert digits.convert_from_word("منفی ۳ هزار", digits="fa", separator=True) == "-۳,۰۰۰"
assert digits.convert_from_word("منفی 3 هزار و 200", digits="fa", separator=True) == "-۳,۲۰۰"
assert digits.convert_from_word("منفی سه هزارمین", digits="fa", separator=True) == "-۳,۰۰۰"
assert digits.convert_from_word("منفی سه هزارمین", digits="fa") == "-۳۰۰۰"
assert digits.convert_from_word("منفی سه هزارمین") == -3000
assert digits.convert_from_word("منفی سه هزارم") == -3000
assert digits.convert_from_word("منفی سه هزارمین") != "-3000"
assert len(str(digits.convert_from_word("منفی سه هزارمین"))) == 5
assert digits.convert_from_word("منفی سی اُم") == -30
assert digits.convert_from_word("سی و سوم") == 33
|
11527117
|
from multiprocessing import *
# TODO: return ProcessContext if join is False
def spawn(fn, args=(), nprocs=1, join=True, daemon=False, start_method="spawn"):
fn(0, *args)
|
11527122
|
import torch
import torch.nn as nn
import torchvision
import numpy as np
# 2D CNN
# Batch Normalization: Accelerating Deep Network Training by Reducing Internal Covariate Shift
# https://arxiv.org/abs/1502.03167
class Cnn(nn.Module):
def __init__(self, input_size, num_classes=2):
super().__init__()
print("Initialize R2D model...")
# Set the first dimension of the input size to be 4, to reduce the amount of computation
input_size[0] = 4
# Input has shape (batch_size, 3, 36, 224, 224)
# (batch_size, channel, time, height, width)
a = torch.tensor(np.zeros(input_size), dtype=torch.float32)
print("Input size:")
print("\t", a.size())
# 2D CNN (we the Inception-v1 version 2D CNN)
b = a.transpose(1, 2) # (batch_size, time, channel, height, width)
bs = b.size()
b = b.reshape(bs[0]*bs[1], bs[2], bs[3], bs[4]) # (batch_size X time, channel, height, width)
self.cnn = torchvision.models.googlenet(pretrained=True, progress=True)
num_features = self.cnn.fc.in_features
self.cnn.fc = nn.Linear(num_features, num_classes)
b = self.cnn(b) # (batch_size X time, num_classes)
b = b.reshape(bs[0], bs[1], -1) # (batch_size, time, num_classes)
b = b.transpose(1, 2) # (batch_size, num_classes, time)
print("CNN model output size:")
print("\t", b.size())
def forward(self, x):
# x has shape (batch_size, channel, time, height, width)
x = x.transpose(1, 2) # (batch_size, time, channel, height, width)
xs = x.size()
x = x.reshape(xs[0]*xs[1], xs[2], xs[3], xs[4]) # (batch_size X time, channel, height, width)
x = self.cnn(x) # (batch_size X time, num_classes)
x = x.reshape(xs[0], xs[1], -1) # (batch_size, time, num_classes)
x = x.transpose(1, 2) # (batch_size, num_classes, time)
return x
|
11527127
|
import numpy as np
from ..graph_io import TensorProtoIO, OpsProtoIO
from ..operations import OpsParam
def shape_2_ak_shape(shape):
"""
onnx shape to anakin shape
:param shape:
:return:
"""
mini_shape = [i for i in shape if (i is not None and i > 0)]
return map(int, [1] * (4 - len(mini_shape)) + list(mini_shape))
def np_2_ak_tensor(np_tensor):
"""
onnx np array to tensor
:param np_tensor:
:return:
"""
data_type_map2 ={
np.dtype('float32'): 'float',
np.dtype('int32'): 'int',
np.dtype('bool'): 'bool'
}
data_type_map = {
'float32': 'float',
'int32': 'int',
'bool': 'bool'
}
# print 'np_tensor: ', np_tensor['dtype']
#exit()
type_str = data_type_map.get(np_tensor['dtype'])
#assert type_str != None
ak_tensor = TensorProtoIO()
ak_tensor.set_shape(shape_2_ak_shape(np_tensor['shape']))
# ak_tensor.set_data(np_tensor['data'], type_str)
# print('type: ', type(np_tensor['data']), np_tensor['shape'], np_tensor['dtype'], type_str)
if (len(np_tensor['shape']) == 1):
ak_tensor.set_data(np_tensor['data'], type_str)
else:
ak_tensor.set_data(np_tensor['data'].flatten(), type_str)
return ak_tensor
class MedTransAK:
"""
tools on med graph to anakin graph
"""
def __init__(self):
self.input_count=0
def Convolution(self, med_attr, param):
"""
get Conv param
:param med_attr:
:param param:
:return:
"""
np_filters = med_attr['weights']
param.weight_1 = np_2_ak_tensor(np_filters)
param.filter_num = np_filters['shape'][0] #?
param.kernel_size = med_attr['kernel']
param.strides = med_attr['strides']
param.padding = med_attr['padding'] #T L B R
param.dilation_rate = med_attr['dilations']
# print('-------conv group----')
# print('filter_num: ', param.filter_num)
# print('group: ', med_attr['group'])
param.group = med_attr['group']
param.axis = 1
if med_attr.get('bias') is not None:
param.bias_term = True
bias_tensor = med_attr['bias']
bias_tensor['shape'] = [1, 1, 1, bias_tensor['shape'][-1]]
param.weight_2 = np_2_ak_tensor(bias_tensor)
else:
param.bias_term = False
def Normalize(self, med_attr, param):
"""
get Normalize param
:param med_attr:
:param param:
:return:
"""
np_filters = med_attr['weights']
param.weight_1 = np_2_ak_tensor(np_filters)
param.begin_norm_axis = med_attr['begin_norm_axis']
param.is_across_spatial = med_attr['is_across_spatial']
param.is_shared_channel = med_attr['is_shared_channel'] #T L B R
param.eps = med_attr['eps']
param.p = med_attr['p']
def Dense(self, med_attr, param):
"""
get dense param
:param med_attr:
:param param:
:return:
"""
param.axis = 1
param.out_dim = 0
if med_attr['Gemm'] == 1:
param.weight_1 = np_2_ak_tensor(med_attr['weights'])
# if med_attr.get('trans') is not None:
# param.out_dim = med_attr['weights']['shape'][1]
# print'trans out_dim', param.out_dim, type(param.out_dim)
# else:
# param.out_dim = med_attr['weights']['shape'][0]
# print'out_dim', param.out_dim
else:
param.weight_1 = TensorProtoIO()
if med_attr.get('bias') is not None:
param.bias_term = True
param.weight_2 = np_2_ak_tensor(med_attr['bias'])
param.out_dim = len(med_attr['bias']['data'].flatten())
else:
param.bias_term = False
#print 'shape: ', med_attr['weights']['shape']
def ReLU(self, med_attr, param):
"""
get relu param
:param med_attr:
:param param:
:return:
"""
if med_attr.get('alpha') is None:
param.alpha = 0.0
else:
param.alpha = med_attr['type']
def PReLU(self, med_attr, param):
"""
get relu param
:param med_attr:
:param param:
:return:
"""
if med_attr.get('channel_shared') is None:
param.channel_shared = False
else:
param.channel_shared = med_attr['channel_shared']
def Concat(self, med_attr, param):
"""
get concat param
:param med_attr:
:param param:
:return:
"""
if med_attr.get('axis') is None:
param.axis = 0.0
else:
param.axis = med_attr['axis']
def Activation(self, med_attr, param):
"""
grt act param
:param med_attr:
:param param:
:return:
"""
param.type = med_attr['type']
if med_attr['type'] == 'PReLU':
if med_attr.get('channel_shared') is None:
param.channel_shared = False
else:
param.channel_shared = med_attr['channel_shared']
param.weight_1 = np_2_ak_tensor(med_attr['weights'])
def Reshape(self, med_attr, param):
"""
get reshape param
:param med_attr:
:param param:
:return:
"""
shape = med_attr['shape']
if isinstance(shape, type(np.array([]))):
shape = [int(i) for i in shape]
# print('***Reshape:*** ', shape)
param.dims = shape_2_ak_shape(shape)
# print(param.dims)
pass
def Permute(self, med_attr, param):
"""
get Permute param
:param med_attr:
:param param:
:return:
"""
shape = med_attr['shape']
param.dims = shape
def Pooling(self, med_attr, param):
"""
get pooling param
:param med_attr:
:param param:
:return:
"""
param.method = med_attr['type']
param.pool_size = med_attr['window']
param.strides = med_attr['strides']
param.padding = med_attr['padding'] # T L B R
if med_attr.get('global_pooling') is None:
param.global_pooling = False
else:
param.global_pooling = med_attr['global_pooling']
# if med_attr['padding'][0] == 0:
# param.cmp_out_shape_floor_as_conv = False
# else:
# param.cmp_out_shape_floor_as_conv = True
param.cmp_out_shape_floor_as_conv = True
pass
def Input(self, med_attr, param):
"""
get input param
:param med_attr:
:param param:
:return:
"""
param.input_shape = shape_2_ak_shape(med_attr['shape'])
param.alias = 'input_' + str(self.input_count)
self.input_count += 1
def Dropout(self, med_attr, param):
"""
get dropoout param
:param med_attr:
:param param:
:return:
"""
param.ratio = med_attr['ratio']
def Split(self, med_attr, param):
"""
get split param
:param med_attr:
:param param:
:return:
"""
param.split_num = med_attr['split_num']
def Eltwise(self, med_attr, param):
"""
get eltwise param
:param med_attr:
:param param:
:return:
"""
assert med_attr['type'] == 'Add'
param.type = med_attr['type']
param.coeff = [1.0, 1.0]
def Scale(self, med_attr, param):
"""
get scale param
:param med_attr:
:param param:
:return:
"""
# print 'weights'
param.weight_1 = np_2_ak_tensor(med_attr['weights'])
# print 'bias'
if med_attr.get('bias') is not None:
param.weight_2 = np_2_ak_tensor(med_attr['bias'])
param.bias_term = True
else:
param.bias_term = False
param.axis = 1
param.num_axes = 1
def Flatten(self, med_attr, param):
"""
get flatten param
:param med_attr:
:param param:
:return:
"""
param.start_axis = med_attr['start_axis']
param.end_axis = med_attr['end_axis']
def LRN(self, med_attr, param):
"""
get lrn param
:param med_attr:
:param param:
:return:
"""
param.local_size = med_attr['local_size']
param.alpha = med_attr['alpha']
param.beta = med_attr['beta']
param.k = med_attr['k']
param.norm_region = "ACROSS_CHANNELS"
def Softmax(self, med_attr, param):
"""
get softmax param
:param med_attr:
:param param:
:return:
"""
if med_attr.get('axis') is None:
param.axis = 3
else:
param.axis = med_attr['axis']
pass
def PixelShuffle(self, med_attr, param):
if med_attr.get('rw') is None:
param.rw = 2
else:
param.rw = med_attr['rw']
if med_attr.get('rh') is None:
param.rh = 2
else:
param.rh = med_attr['rh']
if med_attr.get('channel_first') is None:
param.channel_first = True
else:
param.channel_first = med_attr['channel_first']
# if med_attr.get('scale_factor') is None:
# param.scale_factor = 2
# else:
# param.scale_factor = med_attr['scale_factor']
def map_med_2_ak(self, ak_node, med_node):
"""
med graph convert to anakin graph
:param ak_node:
:param med_node:
:return:
"""
type_name = med_node['ak_type']
func = getattr(self, type_name, None)
param = OpsParam()
ak_op = OpsProtoIO()
med_attr = med_node['ak_attr']
#print type_name
# print med_node['name'], med_node['type'], med_node['ak_type']
func(med_attr, param)
# print 'func success'
param.feed_node_attr(ak_node)
ak_op.set_name(med_node['ak_type'])
ak_node.set_op(ak_op())
# print 'name', med_node['name']
# print 'type', type(med_node['input']), med_node['input']
# print 'type', type(med_node['output']), med_node['output']
[ak_node.add_in(i) for i in med_node['input']]
[ak_node.add_out(i) for i in med_node['output']]
|
11527141
|
from Chapter_03 import DecisionTree_CART_RF as CART
import pprint
filename = 'bcancer.csv'
dataset = CART.load_csv(filename)
# convert string attributes to integers
for i in range(0, len(dataset[0])):
CART.str_column_to_float(dataset, i)
#Now remove index column from the data set
dataset_new = []
for row in dataset:
dataset_new.append([row[i] for i in range(1,len(row))])
#Get training and testing data split
training,testing = CART.getTrainTestData(dataset_new, 0.7)
tree = CART.build_tree(training,11,5)
pprint.pprint(tree)
pre = []
act = []
for row in training:
prediction = CART.predict(tree, row)
pre.append(prediction)
actual = act.append(row[-1])
# print('Expected=%d, Got=%d' % (row[-1], prediction))
# print_tree(tree)
acc = CART.accuracy_metric(act, pre)
print('training accuracy: %.2f'%acc)
for row in testing:
prediction = CART.predict(tree, row)
pre.append(prediction)
actual = act.append(row[-1])
acc = CART.accuracy_metric(act, pre)
# pprint.pprint(tree)
print('testing accuracy: %.2f'%acc)
|
11527149
|
import sys
val=0
sys.path.append("../")
from appJar import gui
def press(btn):
print(btn)
print(app.getEntry("a"))
app.setButton("Name", "b")
def num(btn):
global val
app.setEntry("ne1", "hiya"+str(val))
val += 1
app=gui()
app.addEntry("a")
app.setEntryDefault("a", "This is the default")
app.setEntryMaxLength("a", 7)
app.setEntryUpperCase("a")
app.addNamedButton("Check", "Name", press)
app.addNamedButton("Check", "Name2", press)
app.addNamedButton("Check", "Name3", press)
app.addNumericEntry("ne1")
app.addButton("set", num)
app.addLink("Click Me", press)
app.addWebLink("link1", "http://www.google.com")
app.go()
|
11527171
|
from __future__ import annotations
from typing import List, Union, Optional, Any, TYPE_CHECKING
import numpy as np
from pyNastran.bdf.cards.aero.utils import (
points_elements_from_quad_points)
from pyNastran.converters.avl.avl_helper import integer_types, get_spacing, save_wing_elements
if TYPE_CHECKING: # pragma: no cover
from cpylog import SimpleLogger
from pyNastran.converters.avl.body import Body
class Surface:
def __init__(self,
name: str,
sections: List[Any],
nchord: int, chord_spacing: float,
nspan: int, span_spacing: float,
component: Optional[int]=None,
yduplicate: Optional[float]=None,
angle: Optional[float]=None,
nowake: Optional[bool]=False,
noload: Optional[bool]=False):
self.name = name
self.component = component
self.yduplicate = yduplicate
self.angle = angle
self.sections = sections
self.nchord = nchord
self.chord_spacing = chord_spacing
self.nspan = nspan
self.span_spacing =span_spacing
self.nowake = nowake
self.noload = noload
self.scale = np.ones(3)
self.translate = np.zeros(3)
def __contains__(self, variable_name: str) -> bool:
if variable_name == 'name':
return True
if variable_name == 'component':
return self.component is not None
elif variable_name == 'yduplicate':
return self.yduplicate is not None
elif variable_name == 'angle':
return self.angle is not None
elif variable_name == 'scale':
return self.has_scale()
elif variable_name == 'translate':
return self.has_translate()
elif variable_name == 'nowake':
return self.nowake is True
elif variable_name == 'noload':
return self.noload is True
raise NotImplementedError(variable_name)
def has_scale(self) -> bool:
"""does the surface have a SCALE field"""
return not np.array_equal(self.scale, np.ones(3))
def has_translate(self) -> bool:
"""does the surface have a TRANSLATE field"""
return not np.array_equal(self.translate, np.zeros(3))
def write(self) -> str:
"""writes the surface"""
surface_msg = (
'#--------------------------------------------------\n'
'SURFACE\n'
f'{self.name}\n'
)
surface_msg += self._write_chord_span()
if self.nowake:
surface_msg += 'NOWAKE\n'
if self.noload:
surface_msg += 'NOLOAD\n'
if self.yduplicate is not None:
yduplicate = self.yduplicate
surface_msg += (
'YDUPLICATE\n'
f' {yduplicate}\n'
)
if self.component is not None:
surface_msg += (
'\n'
'COMPONENT\n'
f' {self.component}\n'
)
if self.has_scale():
xscale, yscale, zscale = self.scale
surface_msg += (
'\n'
'SCALE\n'
f' {xscale} {yscale} {zscale}\n'
)
#if 'body_file' in surface:
#body_filename = surface['body_file']
#surface_msg += (
#'\n'
#'BFIL\n'
#f'{body_filename}\n'
#)
#del surface['body_file']
if self.angle is not None:
surface_msg += (
'\n'
'ANGLE\n'
f' {self.angle}\n'
)
if self.has_translate():
dx, dy, dz = self.translate
surface_msg += (
'\n'
'TRANSLATE\n'
f' {dx} {dy} {dz}\n'
)
surface_msg += self._write_sections()
return surface_msg
def _write_chord_span(self) -> str:
"""writes the size of the surface"""
nchordwise = self.nchord
c_space = self.chord_spacing
nspanwise = self.nspan
s_space = self.span_spacing
if isinstance(nchordwise, integer_types) and isinstance(nspanwise, integer_types):
surface_msg = ('!Nchordwise Cspace Nspanwise Sspace\n'
f'{nchordwise} {c_space} {nspanwise} {s_space}\n')
elif isinstance(nchordwise, integer_types):
surface_msg = ('!Nchordwise Cspace Nspanwise Sspace\n'
f'{nchordwise} {c_space}\n')
else:
raise NotImplementedError(self)
return surface_msg
def _write_sections(self) -> str:
"""writes a section"""
surface_msg = ''
for isection, section in enumerate(self.sections):
section_msg = 'SECTION\n'
xle, yle, zle = section['xyz_LE']
chord, ainc, nspan, span_spacing = section['section']
if nspan is None or span_spacing is None:
section_msg += (
'#Xle Yle Zle Chord Ainc\n'
f'{xle} {yle} {zle} {chord} {ainc}\n'
)
else:
section_msg += (
'#Xle Yle Zle Chord Ainc Nspanwise Sspace\n'
f'{xle} {yle} {zle} {chord} {ainc} {nspan} {span_spacing}\n'
)
for control in section['control']:
section_msg += control.write()
if 'is_afile' in section and section['is_afile']:
afile = section['afile']
section_msg += (
'AFIL\n'
f'{afile}\n'
)
#!AFILE
#!a1.dat
#!CONTROL
#!flap 1.0 0.81 0. 0. 0. +1
surface_msg += section_msg + '#---------------------------\n'
return surface_msg
def get_nodes_elements(self,
isurface: int,
surfaces: List[Union[Surface, Body]],
dirname: str,
nodes: List[np.ndarray],
ipoint: int,
line_elements: List[np.ndarray],
quad_elements: List[np.ndarray],
is_cs_list: List[np.ndarray],
log: SimpleLogger) -> int:
"""builds the surface mesh"""
xyz_scale = self.scale
dxyz = self.translate
assert isinstance(xyz_scale, np.ndarray)
assert isinstance(dxyz, np.ndarray)
yduplicate = self.yduplicate
name = self.name
log.debug("name=%r ipoint=%s" % (name, ipoint))
#if 'chord' not in surface:
#log.debug('no chord for %s...' % name)
#return ipoint
ipoint = self._get_wing(
isurface, xyz_scale, dxyz, ipoint, nodes,
quad_elements, surfaces, is_cs_list, yduplicate, log)
return ipoint
def _get_wing(self, isurface: int,
xyz_scale: np.ndarray,
dxyz: np.ndarray,
ipoint: int,
nodes: List[np.ndarray],
quad_elements: List[np.ndarray],
surfaces: List[Union[Surface, Body]],
is_cs_list: List[np.ndarray],
yduplicate: float,
log: SimpleLogger) -> int:
log.debug('get_wing')
name = self.name
nchord = self.nchord
chord_spacing = self.chord_spacing
nspan = self.nspan
span_spacing = self.span_spacing
sections = self.sections
span_stations, airfoil_sections, spanwise_distances, nspans = get_airfoils_from_sections(sections, log)
nspan_total = nspan
if nspan_total is None:
nspan_total = sum(nspans)
log.debug('span_stations %s' % span_stations)
#for iairfoil, is_afile in enumerate(surface['is_afile']):
#pass
#surface['naca']
#print('naca =', naca)
#loft_sections = []
#for naca in airfoils:
#get_lofted_sections(None)
assert nchord > 0, nchord
#assert nspan > 0, nspan
nsections = len(sections)
if len(spanwise_distances) == 1:
nspanwise_panels = [nspan_total]
else:
dy = spanwise_distances.sum()
nspanwise_panels = (spanwise_distances / dy * nspan_total).astype('int32')
izero = np.where(nspanwise_panels == 0)[0]
nspanwise_panels[izero] = 1
assert isinstance(nchord, int), f'name={name!r} nchord={nchord}'
assert isinstance(nspan_total, int), f'name={name!r} nspan_total={nspan_total}'
x = get_spacing(nchord, chord_spacing)
#del surface['sections']
#print('wing surface:', str(str))
#print(surface.keys())
#print('name =', surface['name'])
for i in range(nsections-1):
end = (i == nsections - 1)
section0 = sections[i]
nspan_global = nspanwise_panels[i]
section_data = section0['section']
nspani, span_spacingi = section_data[2:]
#if 'afile' in section0:
#del section0['afile']
#if 'control' in section0:
#del section0['control']
section1 = sections[i+1]
#if 'afile' in section1:
#del section1['afile']
#if 'control' in section1:
#del section1['control']
if nspan is not None:
assert nspan_global >= 1, nspani
y = get_spacing(nspan_global, span_spacing)
else:
# nspan / Sspace for last section are ignored
assert isinstance(nspani, int), nspani
assert nspani >= 1, nspani
y = get_spacing(nspani, span_spacingi)
ipoint = _section_get_nodes_elements(
isurface, i,
section0, section1, airfoil_sections,
x, y, nspan, end, yduplicate,
xyz_scale, dxyz,
ipoint, nodes, quad_elements, surfaces, is_cs_list)
nodes_temp = np.vstack(nodes)
assert nodes_temp.shape[0] == ipoint, 'nodes_temp.shape=%s ipoint=%s' % (nodes_temp.shape, ipoint)
return ipoint
def __repr__(self) -> str:
msg = (
f'Surface(name={self.name})'
)
return msg
def _section_get_nodes_elements(isurface: int, i: int,
section0, section1,
airfoil_sections: List[Any],
x: np.ndarray,
y: np.ndarray,
nspan: int,
end: bool,
yduplicate: float,
xyz_scale: np.ndarray,
dxyz: np.ndarray,
ipoint: int,
nodes: List[np.ndarray],
quad_elements: List[np.ndarray],
surfaces: List[Union[Surface, Body]],
is_cs_list: List[np.ndarray]):
#print(section0)
#print('*****************')
#print(section1)
p1 = np.array(section0['xyz_LE'])
p4 = np.array(section1['xyz_LE'])
#Xle,Yle,Zle = airfoil's leading edge location
#Chord = the airfoil's chord (trailing edge is at Xle+Chord,Yle,Zle)
#Ainc = incidence angle, taken as a rotation (+ by RH rule) about
#the surface's spanwise axis projected onto the Y-Z plane.
#Nspan = number of spanwise vortices until the next section [ optional ]
#Sspace = controls the spanwise spacing of the vortices [ optional ]
#section = [chord, ainc]
#section = [chord, ainc, nspan, span_spacing]
chord0 = section0['section'][0]
chord1 = section1['section'][0]
#print('chords =', chord0, chord1)
#print('xyz_scale =', xyz_scale)
#incidence = section[1]
p2 = p1 + np.array([chord0, 0., 0.])
p3 = p4 + np.array([chord1, 0., 0.])
alpha0 = section0['section'][1]
alpha1 = section1['section'][1]
if airfoil_sections[i] is not None:
if not airfoil_sections[i].shape == airfoil_sections[i+1].shape:
raise RuntimeError('airfoil_sections[%i]=%s airfoil_sections[%i]=%s' % (
i, airfoil_sections[i].shape,
i + 1, airfoil_sections[i+1].shape))
interpolated_stations = interp_stations(
y, nspan,
airfoil_sections[i], chord0, alpha0, p1,
airfoil_sections[i+1], chord1, alpha1, p4, end=end)
#loft_sections.append(chord0*airfoil_sections[i])
#loft_sections.append(chord1*airfoil_sections[i+1])
assert len(x) > 1, x
point, element = points_elements_from_quad_points(p1, p2, p3, p4,
x, y, dtype='int32')
nelements = element.shape[0]
is_cs = _section_get_is_cs(section0, section1, x, y, nelements)
#dxyz[1] = 0.
ipoint = save_wing_elements(
isurface, point, element,
xyz_scale, dxyz,
nodes, quad_elements, surfaces,
is_cs_list, is_cs,
ipoint)
nodes_temp = np.vstack(nodes)
assert nodes_temp.shape[0] == ipoint, 'nodes_temp.shape=%s ipoint=%s' % (nodes_temp.shape, ipoint)
#point2 = None
#element2 = None
#print("p1[%i] = %s" % (i, p1[:2]))
if yduplicate is not None:
assert np.allclose(yduplicate, 0.0), 'yduplicate=%s' % yduplicate
p1[1] *= -1
p2[1] *= -1
p3[1] *= -1
p4[1] *= -1
# dxyz2 has to be calculated like this because dxyz is global to the surface
# and we need a mirrored dxyz
dxyz2 = np.array([dxyz[0], -dxyz[1], dxyz[2]])
point2, element2 = points_elements_from_quad_points(p1, p2, p3, p4,
x, y, dtype='int32')
ipoint = save_wing_elements(
isurface, point2, element2,
xyz_scale, dxyz2,
nodes, quad_elements, surfaces,
is_cs_list, is_cs,
ipoint)
nodes_temp = np.vstack(nodes)
assert nodes_temp.shape[0] == ipoint, 'nodes_temp.shape=%s ipoint=%s' % (nodes_temp.shape, ipoint)
#for e in elements:
#print(" ", e)
#print('npoint=%s nelement=%s' % (npoint, nelement2))
#break
#if not section['afile']:
#del section['afile']
#if not section['control']:
#del section['control']
#print(' ', section)
return ipoint
def get_airfoils_from_sections(sections, log: SimpleLogger) -> Tuple[np.ndarray,
List[Optional[np.ndarray]],
np.ndarray,
List[int]]:
nspans = []
airfoil_sections = []
is_airfoil_defined = False
span_stations = np.arange(len(sections))
leading_edges = []
trailing_edges = []
for isection, section in enumerate(sections):
leading_edge = section['xyz_LE']
section_data = section['section']
# chord, ainc, nspan, sspan
chord = section_data[0]
nspan = section_data[2]
trailing_edge = leading_edge + np.array([chord, 0., 0.])
leading_edges.append(leading_edge)
trailing_edges.append(trailing_edge)
nspans.append(nspan)
log.debug(str(section))
if 'is_afile' in section:
is_afile = section['is_afile']
is_airfoil_defined = True
else:
assert is_airfoil_defined is False, is_airfoil_defined
airfoil_sections.append(None)
continue
if is_afile:
xy = None
else:
naca = section['naca']
xy = get_naca_4_series(log, naca=naca)
airfoil_sections.append(xy)
leading_edges = np.array(leading_edges)
trailing_edges = np.array(trailing_edges)
quarter_chord = 0.75 * leading_edges + 0.25 * trailing_edges
# array([[-3.41 , 0. , 0. ],
# [-3.25 , 18. , 0. ],
# [-2.5 , 41.66 , 4.25 ],
# [-1.788, 55.75 , 9.38 ],
# [-0.95 , 57.64 , 10.064],
# [ 0. , 58.3 , 10.4 ]])
dedge = quarter_chord[1:, :] - quarter_chord[:-1, :]
# array([[ 0.16 , 18. , 0. ],
# [ 0.75 , 23.66 , 4.25 ],
# [ 0.712, 14.09 , 5.13 ],
# [ 0.838, 1.89 , 0.684],
# [ 0.95 , 0.66 , 0.336]])
distances = np.linalg.norm(dedge, axis=1)
# array([18.0007111 , 24.0503763 , 15.01172688, 2.17765929, 1.20457295])
return span_stations, airfoil_sections, distances, nspans
def get_naca_4_series(log: SimpleLogger, naca: str='2412') -> np.ndarray:
"""
m=max_camber=2%
p=located at 40%
t=max thickness=12%
"""
log.debug('naca airfoil=%s' % naca)
t = int(naca[2:]) / 100.
m = int(naca[0]) / 100.
p = int(naca[1]) / 10.
log.debug('t=%s m=%s p=percent_of_max_camber=%s' % (t, m, p))
# setup the x locations
if p > 0.:
# xa = x/chord before the location of max camber
# xb = x/chord after the location of max camber
xa = np.linspace(0., p, num=4, endpoint=False, retstep=False, dtype=None)
xb = np.linspace(p, 1., num=6, endpoint=True, retstep=False, dtype=None)
x = np.hstack([xa, xb])
else:
x = np.linspace(0., 1., num=100, endpoint=True, retstep=False, dtype=None)
xa = x
xb = np.array([])
log.debug('x = %s' % x)
# https://en.wikipedia.org/wiki/NACA_airfoil
# t - max thickness in percent chord (the last 2 digits)
y_half_thickness = 5*t * (0.2969*x**0.5 - 0.1260*x - 0.3516*x**2 + 0.2843*x**3 - 0.1015*x**4)
# p - location of max camber (second digit)
# m - max camber (1st digit)
#xc2 = xc**2
if p > 0.0:
y_camber_a = m/p**2 * (2*p*xa - xa**2) # if 0 <= x <= pc
y_camber_b = m/(1-p)**2 * ((1-2*p) + 2*p*xb - xb**2) # pc <= x <= c
y_camber = np.hstack([y_camber_a, y_camber_b])
# we're just going to be lazy for now and set theta to 0
#dy_dx_a = 2*m/p**2 * (p-xa) # if 0 <= x <= pc
#dy_dx_b = 2*m/(1-p)**2 * (p-xb) # pc <= x <= c
#theta = np.arctan(dy_dx)
theta = 0. # TODO: wrong
else:
y_camber = np.zeros(x.shape)
theta = 0.
# thickness is applied perpendicular to the camber line
x_upper = x - y_half_thickness * np.sin(theta)
x_lower = x + y_half_thickness * np.sin(theta)
y_upper = y_camber + y_half_thickness * np.cos(theta)
y_lower = y_camber - y_half_thickness * np.cos(theta)
xtotal = np.hstack([x_upper[::-1], x_lower[1:]])
ytotal = np.hstack([y_upper[::-1], y_lower[1:]])
#print('x_upper =', x_upper[::-1])
#print('x_lower =', x_lower[1:])
#print('xtotal =', xtotal)
xy = np.vstack([xtotal, ytotal]).T
#import matplotlib.pyplot as plt
#plt.figure(1)
#plt.plot(xtotal, ytotal)
#plt.grid(True)
#print(xy)
return xy
def _section_get_is_cs(section0, section1,
x: np.ndarray, y: np.ndarray,
nelements: int) -> np.ndarray:
is_cs = np.zeros(nelements, dtype='int32')
if len(section0['control']) and len(section1['control']):
control0 = section0['control'][0]
control1 = section1['control'][0]
control0_xhinge = control0.xhinge
control1_xhinge = control1.xhinge
# wing in x/c coordinates
p1 = np.array([0., 0., 0.])
p2 = np.array([1., 0., 0.])
p3 = np.array([1., 1., 0.])
p4 = np.array([0., 1., 0.])
point_quad, element_quad = points_elements_from_quad_points(
p1, p2, p3, p4,
x, y, dtype='int32')
n1 = element_quad[:, 0]
n2 = element_quad[:, 1]
n3 = element_quad[:, 2]
n4 = element_quad[:, 3]
# del point_quad
xyz1 = point_quad[n1, :]
xyz2 = point_quad[n2, :]
xyz3 = point_quad[n3, :]
xyz4 = point_quad[n4, :]
centroid = (xyz1 + xyz2 + xyz3 + xyz4) / 4.
xcentroid = centroid[:, 0]
ycentroid = centroid[:, 1]
xhinge_required = ycentroid * (control1_xhinge - control0_xhinge) + control0_xhinge
i_cs = xcentroid > xhinge_required
is_cs[i_cs] = 1
return is_cs
def interp_stations(y, unused_nspan,
airfoil_section0, chord0, alpha0, xyz_le0,
airfoil_section1, chord1, alpha1, xyz_le1, end=True):
"""
x is t/c (so x)
y is t/c (so z)
"""
if not airfoil_section0.shape == airfoil_section1.shape: # pragma: no cover
raise RuntimeError('airfoil_section0=%s airfoil_section1=%s' % (
airfoil_section0.shape, airfoil_section1.shape))
#import matplotlib.pyplot as plt
y = np.array([0., 0.5, 1.0])
# first we scale and rotate the section
xy0 = airfoil_section0 * chord0
x0 = xy0[:, 0]
y0 = xy0[:, 1]
#plt.figure(2)
#plt.grid(True)
#plt.plot(x0, y0, 'ro')
x0_rotated = xyz_le0[0] + x0 * np.cos(alpha0) - y0 * np.sin(alpha0)
y0_rotated = xyz_le0[2] + x0 * np.sin(alpha0) + y0 * np.cos(alpha0)
#xy0_rotated = np.vstack([x0_rotated, y0_rotated])
xy1 = airfoil_section1 * chord1
x1 = xy1[:, 0]
y1 = xy1[:, 1]
#plt.plot(x1, y1, 'bo-')
#plt.show()
x1_rotated = xyz_le1[0] + x1 * np.cos(alpha1) - y1 * np.sin(alpha1)
y1_rotated = xyz_le1[2] + x1 * np.sin(alpha1) + y1 * np.cos(alpha1)
#plt.figure(4)
#plt.plot(x0_rotated, y0_rotated)
#plt.plot(x1_rotated, y1_rotated)
#plt.grid(True)
x0_rotated = xyz_le0[0] + x0
y0_rotated = xyz_le0[2] + y0
x1_rotated = xyz_le1[0] + x1
y1_rotated = xyz_le1[2] + y1
#xy1_rotated = np.vstack([x1_rotated, y1_rotated])
end = True
if not end:
y = y[:-1]
#print(y.shape) # 3
#print(x0_rotated.shape) #
#y2 = y[np.newaxis, :] + 1
#print(y2)
# use linear interpolation to calculate the interpolated stations
#x_final = y[:, np.newaxis] * x0_rotated * (1.-y[:, np.newaxis]) * x1_rotated
#y_final = y[:, np.newaxis] * y0_rotated * (1.-y[:, np.newaxis]) * y1_rotated
#print(x_final.shape)
x_final = []
y_final = []
#plt.figure(5)
#plt.grid(True)
for yi in y:
x_finali = yi * x0_rotated + (1.-yi) * x1_rotated
y_finali = yi * y0_rotated + (1.-yi) * y1_rotated
#plt.plot(x_finali, y_finali)
x_final.append(x_finali)
y_final.append(y_finali)
x_final = np.array(x_final)
y_final = np.array(y_final)
#plt.show()
# (nspan, nchord, 2) -> (2, nsan, )
# (3, 11, 2) -> (2, 3, 11)
interpolated_stations = np.dstack([x_final, y_final])#.swapaxes(0, 1).swapaxes(0, 2)
#print(x_final.shape)
#print(xy_final.shape)
return interpolated_stations
|
11527243
|
import os
import numpy as np
import torch
from datasets.dataset_seq2seq import DatasetSeq2seq
from models import IdleLayer
def _read_ocr_dataset(root):
file_data = os.path.join(root, 'letter.data')
if not os.path.isfile(file_data):
raise(RuntimeError("Could not found OCR dataset (file letter.data) in " + root))
# read letters
letter_lines = {}
num_bits = 16 * 8
with open(file_data) as data_file:
for line in data_file:
# order of entries in a line: id, letter, next_id, word_id, position, fold, p_0_0, p_0_1, ..., p_15_7
entries = line.split()
assert(len(entries) == 6 + num_bits)
id = int(entries[0])
letter_lines[id] = entries
# find the ordering of letters
last_letters = {}
prev_letter = {}
for id, entries in letter_lines.items():
next_id = int(entries[2])
if next_id == -1:
last_letters[id] = True
else:
if next_id not in prev_letter:
prev_letter[next_id] = id
else:
raise(RuntimeError("Letter {0} is after both {1} and {2}".format(id, prev_letter[next_id], next_id)))
# merge letters into words
words_bits = {}
words_str = {}
folds = {}
used_letters = {}
max_word_length = 0
for last_letter in last_letters:
cur_word = []
cur_letter = last_letter
while cur_letter is not None:
used_letters[cur_letter] = True
cur_word = [cur_letter] + cur_word
if cur_letter in prev_letter:
cur_letter = prev_letter[cur_letter]
assert(cur_letter not in used_letters) # sanity check to avoid infinite loop in case of bad data
else:
cur_letter = None
# check the word and convert in to numpy
word_length = len(cur_word)
word_bits = np.zeros((word_length, num_bits), dtype='uint8')
word_str = np.zeros(word_length, dtype='uint8')
fold = None
word_id = None
for i_pos, id in enumerate(cur_word):
# extract all letter data
# order of entries in a line: id, letter, next_id, word_id, position, fold, p_0_0, p_0_1, ..., p_15_7
entries = letter_lines[id]
cur_id = int(entries[0])
cur_letter = entries[1]
cur_next_id = int(entries[2])
cur_word_id = int(entries[3])
cur_position = int(entries[4])
cur_fold = int(entries[5])
cur_bits = np.array(entries[6:], dtype='uint8')
assert(id == cur_id)
word_str[i_pos] = ord(cur_letter) - ord('a')
if word_id is None:
word_id = cur_word_id
else:
assert(word_id == cur_word_id)
assert(cur_position == i_pos + 1)
if fold is None:
fold = cur_fold
else:
assert(fold == cur_fold)
word_bits[i_pos] = cur_bits
assert(word_id not in words_bits)
words_bits[word_id] = word_bits
words_str[word_id] = word_str
folds[word_id] = fold
max_word_length = max(max_word_length, word_length)
assert(len(used_letters) == len(letter_lines)) # double check that all the letters are used
return words_bits, words_str, folds, max_word_length, num_bits
class OcrDataset(DatasetSeq2seq):
def __init__(self, root, batch_size, is_train=True, split_id=0, num_buckets=1, revert_input_sequence=True,
max_num_items=None):
words_bits, words_str, folds, max_word_length, num_features = _read_ocr_dataset(root)
# get the data from splits
if is_train:
self.word_ids = [word_id for word_id, fold in folds.items() if not fold == split_id]
else:
self.word_ids = [word_id for word_id, fold in folds.items() if fold == split_id]
# prepare data for torch
input_data = [torch.FloatTensor(words_bits[i].astype('float32')) for i in self.word_ids]
output_data = [torch.LongTensor(words_str[i].astype('int64')) for i in self.word_ids]
# init the base dataset class
DatasetSeq2seq.__init__(self, input_data, output_data, batch_size, num_buckets, revert_input_sequence,
max_num_items)
assert(all([x.size(0) == y.size(0) for x, y in zip(self.input_data, self.output_data)]))
self.dataset_name = 'OCR-' + ('train' if is_train else 'test')
self.root = root
self.max_length = 16 # max_word_length
self.num_features = num_features
self.num_input_helper_symbols = 0
self.input_empty_tokens = self.input_empty_tokens * 0
self.num_input_symbols = self.num_input_symbols * 0 + 2
def get_embedding_layer(self):
if self.input_embedding is None:
self.input_embedding = IdleLayer()
self.input_embedding_size = self.num_features
return self.input_embedding, self.input_embedding_size
|
11527246
|
def main(nums):
pd = [-1] * len(nums)
pd[0] = 0
for i, element in enumerate(nums):
for j in range(i + 1 , i + element + 1 ):
if j >= len(nums) or pd[j] != -1:
continue
pd[j] = pd[i]+1
return pd[-1]
# pd = None
# def min_numers_jumps(nums, i):
# global pd
# if i >= len(nums):
# return 0
# if pd[i] != -1:
# return pd[i]
# min_jumps = 100000000
# for j in range(1,nums[i]+1):
# print(pd, "curr i= ", i, "j=",j)
# min_jumps = min(min_jumps, 1 + min_numers_jumps(nums, i + j))
# pd[i] = min_jumps
# return min_jumps
# def main(nums):
# esta solução não funciona porque está fazendo em profundidade primeiro
# global pd
# result = min_numers_jumps(nums, 0)
# print("finish", pd)
# return result
# [0 ,-1,-1,-1,-1]
# [0 ,-1,-1,-1,-1]
# [2,3,1,1,4]
# 0 - 2
# 1 - 3
# pd[2] = 1
# [0,0,0,0,0]
# [0,1,1,0,0] - ind0
# [0,1,1,2,2] - ind1
# [0,1,1,2,2] - ind2
# [0,1,1,2,2] - ind3
# [0,1,1,2,2] - ind4
# ...
# N === 10^4
# [4, 3, 2, 3, , ,]
# int pd[100000];
# int array[10000];
# int mochila(int j, int n)
# {
# if (j >= n) return 0;
# if (pd[j] != -1) return pd[j];
# int minJump = 1e9
# for (int jump = 1; jump <= array[j]; jump++ )
# {
# minJump = min(minJump, jump + mochila(j+jump, n))
# }
# pd[j] = minJump;
# }
# for nums
# for 1..nums[i] # até 1000
|
11527304
|
import subprocess
import os
import infra.basetest
class TestUbi(infra.basetest.BRTest):
config = infra.basetest.BASIC_TOOLCHAIN_CONFIG + \
"""
BR2_TARGET_ROOTFS_UBIFS=y
BR2_TARGET_ROOTFS_UBIFS_LEBSIZE=0x7ff80
BR2_TARGET_ROOTFS_UBIFS_MINIOSIZE=0x1
BR2_TARGET_ROOTFS_UBI=y
BR2_TARGET_ROOTFS_UBI_PEBSIZE=0x80000
BR2_TARGET_ROOTFS_UBI_SUBSIZE=1
"""
# TODO: if you boot Qemu twice on the same UBI image, it fails to
# attach the image the second time, with "ubi0 error:
# ubi_read_volume_table: the layout volume was not found".
# To be investigated.
def test_run(self):
img = os.path.join(self.builddir, "images", "rootfs.ubi")
out = infra.run_cmd_on_host(self.builddir, ["file", img])
out = out.splitlines()
self.assertIn("UBI image, version 1", out[0])
subprocess.call(["truncate", "-s 128M", img])
self.emulator.boot(arch="armv7",
kernel="builtin",
kernel_cmdline=["root=ubi0:rootfs",
"ubi.mtd=0",
"rootfstype=ubifs"],
options=["-drive", "file={},if=pflash".format(img)])
self.emulator.login()
cmd = "mount | grep 'ubi0:rootfs on / type ubifs'"
_, exit_code = self.emulator.run(cmd)
self.assertEqual(exit_code, 0)
|
11527399
|
import os
import json
import base64
import random
from mock import patch
import pytest
from mapboxgl.viz import *
from mapboxgl.errors import TokenError, LegendError
from mapboxgl.utils import create_color_stops, create_numeric_stops
from matplotlib.pyplot import imread
@pytest.fixture()
def data():
with open('tests/points.geojson') as fh:
return json.loads(fh.read())
@pytest.fixture()
def polygon_data():
with open('tests/polygons.geojson') as fh:
return json.loads(fh.read())
@pytest.fixture()
def linestring_data():
with open('tests/linestrings.geojson') as fh:
return json.loads(fh.read())
TOKEN = '<PASSWORD>'
def test_secret_key_CircleViz(data):
"""Secret key raises a token error
"""
secret = '<KEY>'
with pytest.raises(TokenError):
CircleViz(data, access_token=secret)
def test_secret_key_GraduatedCircleViz(data):
"""Secret key raises a token error
"""
secret = '<KEY>'
with pytest.raises(TokenError):
GraduatedCircleViz(data, access_token=secret)
def test_secret_key_ChoroplethViz(polygon_data):
"""Secret key raises a token error
"""
secret = '<KEY>'
with pytest.raises(TokenError):
ChoroplethViz(polygon_data, access_token=secret)
def test_secret_key_LinestringViz(linestring_data):
"""Secret key raises a token error
"""
secret = '<KEY>'
with pytest.raises(TokenError):
LinestringViz(linestring_data, access_token=secret)
def test_token_env_CircleViz(monkeypatch, data):
"""Viz can get token from environment if not specified
"""
monkeypatch.setenv('MAPBOX_ACCESS_TOKEN', TOKEN)
viz = CircleViz(data, color_property="Avg Medicare Payments")
assert TOKEN in viz.create_html()
def test_token_env_GraduatedCircleViz(monkeypatch, data):
"""Viz can get token from environment if not specified
"""
monkeypatch.setenv('MAPBOX_ACCESS_TOKEN', TOKEN)
viz = GraduatedCircleViz(data,
color_property="Avg Medicare Payments",
radius_property="Avg Covered Charges")
assert TOKEN in viz.create_html()
def test_token_env_ChoroplethViz(monkeypatch, polygon_data):
"""Viz can get token from environment if not specified
"""
monkeypatch.setenv('MAPBOX_ACCESS_TOKEN', TOKEN)
viz = ChoroplethViz(polygon_data, color_property="density")
assert TOKEN in viz.create_html()
def test_token_env_LinestringViz(monkeypatch, linestring_data):
"""Viz can get token from environment if not specified
"""
monkeypatch.setenv('MAPBOX_ACCESS_TOKEN', TOKEN)
viz = LinestringViz(linestring_data, color_property="sample")
assert TOKEN in viz.create_html()
def test_html_color(data):
viz = CircleViz(data,
color_property="Avg Medicare Payments",
access_token=TOKEN)
assert "<html>" in viz.create_html()
def test_html_GraduatedCricleViz(data):
viz = GraduatedCircleViz(data,
color_property="Avg Medicare Payments",
radius_property="Avg Covered Charges",
access_token=TOKEN)
assert "<html>" in viz.create_html()
def test_radius_legend_GraduatedCircleViz(data):
"""Raises a LegendError if legend is set to 'radius' legend_function and
legend_gradient is True.
"""
with pytest.raises(LegendError):
viz = GraduatedCircleViz(data,
color_property="Avg Medicare Payments",
radius_property="Avg Covered Charges",
legend_function='radius',
legend_gradient=True,
access_token=TOKEN)
viz.create_html()
def test_html_ChoroplethViz(polygon_data):
viz = ChoroplethViz(polygon_data,
color_property="density",
color_stops=[[0.0, "red"], [50.0, "gold"], [1000.0, "blue"]],
access_token=TOKEN)
assert "<html>" in viz.create_html()
def test_html_LinestringViz(linestring_data):
viz = LinestringViz(linestring_data,
color_property="sample",
color_stops=[[0.0, "red"], [50.0, "gold"], [1000.0, "blue"]],
access_token=TOKEN)
assert "<html>" in viz.create_html()
@patch('mapboxgl.viz.display')
def test_display_CircleViz(display, data):
"""Assert that show calls the mocked display function
"""
viz = CircleViz(data,
color_property='Avg Medicare Payments',
label_property='Avg Medicare Payments',
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_vector_CircleViz(display):
"""Assert that show calls the mocked display function when using data-join technique
for CircleViz.
"""
viz = CircleViz([],
vector_url='mapbox://rsbaumann.2pgmr66a',
vector_layer_name='healthcare-points-2yaw54',
vector_join_property='Provider Id',
data_join_property='Provider Id',
color_property='Avg Medicare Payments',
label_property='Avg Medicare Payments',
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_GraduatedCircleViz(display, data):
"""Assert that show calls the mocked display function
"""
viz = GraduatedCircleViz(data,
color_property='Avg Medicare Payments',
label_property='Avg Medicare Payments',
radius_property='Avg Covered Charges',
radius_function_type='match',
color_function_type='match',
radius_default=2,
color_default='red',
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_vector_GraduatedCircleViz(display):
"""Assert that show calls the mocked display function when using data-join technique
for CircleViz.
"""
viz = GraduatedCircleViz([],
vector_url='mapbox://rsbaumann.2pgmr66a',
vector_layer_name='healthcare-points-2yaw54',
vector_join_property='Provider Id',
data_join_property='Provider Id',
color_property='Avg Medicare Payments',
label_property='Avg Medicare Payments',
radius_property='Avg Covered Charges',
radius_function_type='match',
color_function_type='match',
radius_default=2,
color_default='red',
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_HeatmapViz(display, data):
"""Assert that show calls the mocked display function
"""
viz = HeatmapViz(data,
weight_property='Avg Medicare Payments',
weight_stops=[[10, 0], [100, 1]],
color_stops=[[0, 'red'], [0.5, 'blue'], [1, 'green']],
radius_stops=[[0, 1], [12, 30]],
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_vector_HeatmapViz(display, data):
"""Assert that show calls the mocked display function
"""
viz = HeatmapViz([],
vector_url='mapbox://rsbaumann.2pgmr66a',
vector_layer_name='healthcare-points-2yaw54',
vector_join_property='Provider Id',
data_join_property='Provider Id',
weight_property='Avg Medicare Payments',
weight_stops=[[10, 0], [100, 1]],
color_stops=[[0, 'red'], [0.5, 'blue'], [1, 'green']],
radius_stops=[[0, 1], [12, 30]],
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_ClusteredCircleViz(display, data):
"""Assert that show calls the mocked display function
"""
viz = ClusteredCircleViz(data,
radius_stops=[[10, 0], [100, 1]],
color_stops=[[0, "red"], [10, "blue"], [1, "green"]],
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_ChoroplethViz(display, polygon_data):
"""Assert that show calls the mocked display function
"""
viz = ChoroplethViz(polygon_data,
color_property="density",
color_stops=[[0.0, "red"], [50.0, "gold"], [1000.0, "blue"]],
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_vector_ChoroplethViz(display):
"""Assert that show calls the mocked display function when using data-join technique
for ChoroplethViz.
"""
data = [{"id": "06", "name": "California", "density": 241.7},
{"id": "11", "name": "District of Columbia", "density": 10065},
{"id": "25", "name": "Massachusetts", "density": 840.2},
{"id": "30", "name": "Montana", "density": 6.858},
{"id": "36", "name": "New York", "density": 412.3},
{"id": "49", "name": "Utah", "density": 34.3},
{"id": "72", "name": "<NAME>", "density": 1082}]
viz = ChoroplethViz(data,
vector_url='mapbox://mapbox.us_census_states_2015',
vector_layer_name='states',
vector_join_property='STATEFP',
data_join_property='id',
color_property='density',
color_stops=create_color_stops([0, 50, 100, 500, 1500], colors='YlOrRd'),
access_token=TOKEN
)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_vector_extruded_ChoroplethViz(display):
"""Assert that show calls the mocked display function when using data-join technique
for ChoroplethViz.
"""
data = [{"id": "06", "name": "California", "density": 241.7},
{"id": "11", "name": "District of Columbia", "density": 10065},
{"id": "25", "name": "Massachusetts", "density": 840.2},
{"id": "30", "name": "Montana", "density": 6.858},
{"id": "36", "name": "New York", "density": 412.3},
{"id": "49", "name": "Utah", "density": 34.3},
{"id": "72", "name": "<NAME>", "density": 1082}]
viz = ChoroplethViz(data,
vector_url='mapbox://mapbox.us_census_states_2015',
vector_layer_name='states',
vector_join_property='STATEFP',
data_join_property='id',
color_property='density',
color_stops=create_color_stops([0, 50, 100, 500, 1500], colors='YlOrRd'),
height_property='density',
height_stops=create_numeric_stops([0, 50, 100, 500, 1500, 10000], 0, 1000000),
access_token=TOKEN
)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_LinestringViz(display, linestring_data):
"""Assert that show calls the mocked display function
"""
viz = LinestringViz(linestring_data,
color_property="sample",
color_stops=[[0.0, "red"], [50.0, "gold"], [1000.0, "blue"]],
access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_vector_LinestringViz(display):
"""Assert that show calls the mocked display function when using data-join technique
for LinestringViz.
"""
data = [{"elevation": x, "weight": random.randint(0,100)} for x in range(0, 21000, 10)]
viz = LinestringViz(data,
vector_url='mapbox://mapbox.mapbox-terrain-v2',
vector_layer_name='contour',
vector_join_property='ele',
data_join_property='elevation',
color_property="elevation",
color_stops=create_color_stops([0, 50, 100, 500, 1500], colors='YlOrRd'),
line_width_property='weight',
line_width_stops=create_numeric_stops([0, 25, 50, 75, 100], 1, 6),
access_token=TOKEN
)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_min_zoom(display, data):
viz = GraduatedCircleViz(data,
color_property="Avg Medicare Payments",
label_property="Avg Medicare Payments",
radius_property="Avg Covered Charges",
access_token=TOKEN,
min_zoom=10)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_max_zoom(display, data):
viz = HeatmapViz(data,
weight_property="Avg Medicare Payments",
weight_stops=[[10, 0], [100, 1]],
color_stops=[[0, "red"], [0.5, "blue"], [1, "green"]],
radius_stops=[[0, 1], [12, 30]],
access_token=TOKEN,
max_zoom=5)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_ImageVizPath(display, data):
"""Assert that show calls the mocked display function
"""
image_path = os.path.join(os.path.dirname(__file__), 'mosaic.png')
coordinates = [
[-123.40515640309, 32.08296982365502],
[-115.92938988349292, 32.08296982365502],
[-115.92938988349292, 38.534294809274336],
[-123.40515640309, 38.534294809274336]][::-1]
viz = ImageViz(image_path, coordinates, access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_ImageVizArray(display, data):
"""Assert that show calls the mocked display function
"""
image_path = os.path.join(os.path.dirname(__file__), 'mosaic.png')
image = imread(image_path)
coordinates = [
[-123.40515640309, 32.08296982365502],
[-115.92938988349292, 32.08296982365502],
[-115.92938988349292, 38.534294809274336],
[-123.40515640309, 38.534294809274336]][::-1]
viz = ImageViz(image, coordinates, access_token=TOKEN)
viz.show()
display.assert_called_once()
@patch('mapboxgl.viz.display')
def test_display_RasterTileViz(display, data):
"""Assert that show calls the mocked display function
"""
tiles_url = 'https://a.tile.openstreetmap.org/{z}/{x}/{y}.png'
viz = RasterTilesViz(tiles_url, access_token=TOKEN)
|
11527431
|
import asyncio
import pytest
from tests.utils_for_test import NumbersComparer
from throttled.exceptions import RateLimitExceeded
from throttled.strategies import Strategies
numbers_almost_equals = NumbersComparer(error=1e-2).almost_equals
def function() -> bool:
"""Boilerplate function to test the limiter decorator."""
return True
async def coroutine_function() -> bool:
"""Boilerplate coroutine function to test the limiter decorator."""
return True
def test_limited_function(limit, limiter_for):
limiter = limiter_for(Strategies.MOVING_WINDOW)
limited_func = limiter.decorate(function)
ret = [limited_func() for _ in range(limit.hits)]
with pytest.raises(RateLimitExceeded) as err:
limited_func()
assert all(ret)
assert numbers_almost_equals(err.value.retry_after, limit.interval)
async def test_limited_coroutine_function(limit, limiter_for):
limiter = limiter_for(Strategies.MOVING_WINDOW)
limited_coroutine_func = limiter.decorate(coroutine_function)
ret = await asyncio.gather(*(limited_coroutine_func() for _ in range(limit.hits)))
with pytest.raises(RateLimitExceeded) as err:
await limited_coroutine_func()
assert all(ret)
assert numbers_almost_equals(err.value.retry_after, limit.interval)
|
11527450
|
from __future__ import absolute_import, division, print_function
import pytest
import tensorflow as tf
import numpy as np
from lucid.optvis import objectives, param, render, transform
from lucid.modelzoo.vision_models import InceptionV1
np.random.seed(42)
NUM_STEPS = 3
@pytest.fixture
def inceptionv1():
return InceptionV1()
def assert_gradient_ascent(objective, model, batch=None, alpha=False, shape=None):
with tf.Graph().as_default() as graph, tf.Session() as sess:
shape = shape or [1, 32, 32, 3]
t_input = param.image(shape[1], h=shape[2], batch=batch, alpha=alpha)
if alpha:
t_input = transform.collapse_alpha_random()(t_input)
model.import_graph(t_input, scope="import", forget_xy_shape=True)
def T(layer):
if layer == "input":
return t_input
if layer == "labels":
return model.labels
return graph.get_tensor_by_name("import/%s:0" % layer)
loss_t = objective(T)
opt_op = tf.train.AdamOptimizer(0.1).minimize(-loss_t)
tf.global_variables_initializer().run()
start_value = sess.run([loss_t])
for _ in range(NUM_STEPS):
_ = sess.run([opt_op])
end_value, = sess.run([loss_t])
print(start_value, end_value)
assert start_value < end_value
def test_neuron(inceptionv1):
objective = objectives.neuron("mixed4a_pre_relu", 42)
assert_gradient_ascent(objective, inceptionv1)
def test_channel(inceptionv1):
objective = objectives.channel("mixed4a_pre_relu", 42)
assert_gradient_ascent(objective, inceptionv1)
@pytest.mark.parametrize("cossim_pow", [0, 1, 2])
def test_direction(cossim_pow, inceptionv1):
mixed_4a_depth = 508
random_direction = np.random.random((mixed_4a_depth))
objective = objectives.direction(
"mixed4a_pre_relu", random_direction, cossim_pow=cossim_pow
)
assert_gradient_ascent(objective, inceptionv1)
def test_direction_neuron(inceptionv1):
mixed_4a_depth = 508
random_direction = np.random.random([mixed_4a_depth])
objective = objectives.direction_neuron("mixed4a_pre_relu", random_direction)
assert_gradient_ascent(objective, inceptionv1)
def test_direction_cossim(inceptionv1):
mixed_4a_depth = 508
random_direction = np.random.random([mixed_4a_depth]).astype(np.float32)
objective = objectives.direction_cossim("mixed4a_pre_relu", random_direction)
assert_gradient_ascent(objective, inceptionv1)
def test_tensor_neuron(inceptionv1):
mixed_4a_depth = 508
random_direction = np.random.random([1,3,3,mixed_4a_depth])
objective = objectives.tensor_direction("mixed4a_pre_relu", random_direction)
assert_gradient_ascent(objective, inceptionv1)
def test_deepdream(inceptionv1):
objective = objectives.deepdream("mixed4a_pre_relu")
assert_gradient_ascent(objective, inceptionv1)
def test_tv(inceptionv1):
objective = objectives.total_variation("mixed4a_pre_relu")
assert_gradient_ascent(objective, inceptionv1)
def test_L1(inceptionv1):
objective = objectives.L1() # on input by default
assert_gradient_ascent(objective, inceptionv1)
def test_L2(inceptionv1):
objective = objectives.L2() # on input by default
assert_gradient_ascent(objective, inceptionv1)
def test_blur_input_each_step(inceptionv1):
objective = objectives.blur_input_each_step()
assert_gradient_ascent(objective, inceptionv1)
# TODO: add test_blur_alpha_each_step
# def test_blur_alpha_each_step(inceptionv1):
# objective = objectives.blur_alpha_each_step()
# assert_gradient_ascent(objective, inceptionv1, alpha=True)
def test_channel_interpolate(inceptionv1):
# TODO: should channel_interpolate fail early if batch is available?
objective = objectives.channel_interpolate(
"mixed4a_pre_relu", 0, "mixed4a_pre_relu", 42
)
assert_gradient_ascent(objective, inceptionv1, batch=5)
def test_penalize_boundary_complexity(inceptionv1):
# TODO: is input shape really unknown at evaluation time?
# TODO: is the sign correctly defined on this objective? It seems I need to invert it.
objective = objectives.penalize_boundary_complexity([1, 32, 32, 3])
assert_gradient_ascent(-1 * objective, inceptionv1)
def test_alignment(inceptionv1):
# TODO: is the sign correctly defined on this objective? It seems I need to invert it.
objective = objectives.alignment("mixed4a_pre_relu")
assert_gradient_ascent(-1 * objective, inceptionv1, batch=2)
def test_diversity(inceptionv1):
# TODO: is the sign correctly defined on this objective? It seems I need to invert it.
objective = objectives.diversity("mixed4a_pre_relu")
assert_gradient_ascent(-1 * objective, inceptionv1, batch=2)
def test_input_diff(inceptionv1):
random_image = np.random.random([1, 32, 32, 3])
objective = objectives.input_diff(random_image)
assert_gradient_ascent(-1 * objective, inceptionv1, batch=2)
@pytest.mark.xfail(reason="Unknown cause of failures; seems find in colab.")
def test_class_logit(inceptionv1):
objective = objectives.class_logit("softmax1", "kit fox")
assert_gradient_ascent(objective, inceptionv1, shape=[1, 224, 224, 3])
|
11527460
|
from tasks.salesforce_robot_library_base import SalesforceRobotLibraryBase
class Data(SalesforceRobotLibraryBase):
def bulk_delete(self, objects, *, where=None, hardDelete=False):
self._run_subtask("delete_data", objects=objects, where=where, hardDelete=hardDelete)
|
11527520
|
import time, pytest, inspect
from utils import *
from PIL import Image
def test_mixer_from_config(run_brave, create_config_file):
subtest_start_brave_with_mixers(run_brave, create_config_file)
subtest_assert_two_mixers(mixer_1_props={'width': 160, 'height': 90, 'pattern': 6})
subtest_change_mixer_pattern()
subtest_assert_two_mixers(mixer_1_props={'width': 160, 'height': 90, 'pattern': 7})
subtest_change_width_and_height()
subtest_assert_two_mixers(mixer_1_props={'width': 200, 'height': 300, 'pattern': 7})
subtest_delete_mixers()
subtest_delete_nonexistant_mixer()
def subtest_start_brave_with_mixers(run_brave, create_config_file):
MIXER1 = {
'width': 160,
'height': 90,
'pattern': 6
}
MIXER2 = {
'width': 640,
'height': 360
}
config = {'mixers': [MIXER1, MIXER2]}
config_file = create_config_file(config)
run_brave(config_file.name)
time.sleep(1)
check_brave_is_running()
def subtest_assert_two_mixers(mixer_1_props):
assert_mixers([{
'id': 1,
'uid': 'mixer1',
**mixer_1_props,
}, {
'id': 2,
'uid': 'mixer2',
'width': 640, 'height': 360, 'pattern': 0,
}])
def subtest_change_mixer_pattern():
update_mixer(1, {'pattern': 7})
def subtest_change_width_and_height():
update_mixer(1, {'width': 200, 'height': 300})
def subtest_delete_mixers():
delete_mixer(1)
delete_mixer(2)
assert_mixers([])
def subtest_delete_nonexistant_mixer():
delete_mixer(10, 400)
def test_mixer_from_api(run_brave):
run_brave()
# There is one mixer by default
assert_mixers([{'id': 1, 'width': 640, 'height': 360}])
# Create input, ignore attempts to set an ID
add_mixer({'width': 200, 'height': 200})
time.sleep(1)
assert_mixers([{'id': 1, 'width': 640, 'height': 360},
{'id': 2, 'width': 200, 'height': 200}])
subtest_delete_mixers()
|
11527575
|
from support import lib,ffi
from qcgc_test import QCGCTest
import unittest
class ObjectTestCase(QCGCTest):
def test_write_barrier(self):
o = self.allocate(16)
self.push_root(o)
arena = lib.qcgc_arena_addr(ffi.cast("cell_t *", o))
o.hdr.flags = o.hdr.flags & ~lib.QCGC_GRAY_FLAG
self.assertEqual(ffi.cast("object_t *", o).flags & lib.QCGC_GRAY_FLAG, 0)
lib.qcgc_write(ffi.cast("object_t *", o))
self.assertEqual(ffi.cast("object_t *", o).flags & lib.QCGC_GRAY_FLAG, lib.QCGC_GRAY_FLAG)
lib.qcgc_state.phase = lib.GC_MARK
o = self.allocate(16)
self.push_root(o)
arena = lib.qcgc_arena_addr(ffi.cast("cell_t *", o))
o.hdr.flags = o.hdr.flags & ~lib.QCGC_GRAY_FLAG
self.assertEqual(ffi.cast("object_t *", o).flags & lib.QCGC_GRAY_FLAG, 0)
self.set_blocktype(ffi.cast("cell_t *", o), lib.BLOCK_BLACK)
lib.qcgc_state.phase = lib.GC_MARK
lib.qcgc_write(ffi.cast("object_t *", o))
self.assertEqual(ffi.cast("object_t *", o).flags & lib.QCGC_GRAY_FLAG, lib.QCGC_GRAY_FLAG)
self.assertEqual(lib.arena_gray_stack(arena).count, 1)
self.assertEqual(lib.arena_gray_stack(arena).items[0], o)
if __name__ == "__main__":
unittest.main()
|
11527576
|
import argparse
import fnmatch
import os
import re
import sys
from itertools import chain
from pathlib import Path
import toml
from robot.utils import FileReader
from robocop.exceptions import (
ArgumentFileNotFoundError,
NestedArgumentFileError,
InvalidArgumentError,
ConfigGeneralError,
)
from robocop.rules import RuleSeverity
from robocop.utils import RecommendationFinder
from robocop.version import __version__
def translate_pattern(pattern):
return re.compile(fnmatch.translate(pattern))
def find_severity_value(severity):
for sev in RuleSeverity:
if sev.value == severity.upper():
return sev
return RuleSeverity.INFO
class ParseDelimitedArgAction(argparse.Action): # pylint: disable=too-few-public-methods
def __call__(self, parser, namespace, values, option_string=None):
container = getattr(namespace, self.dest)
container.update(values.split(","))
class ParseCheckerConfig(argparse.Action): # pylint: disable=too-few-public-methods
def __call__(self, parser, namespace, values, option_string=None):
container = getattr(namespace, self.dest)
container.append(values.strip())
class ParseFileTypes(argparse.Action): # pylint: disable=too-few-public-methods
def __call__(self, parser, namespace, values, option_string=None):
filetypes = getattr(namespace, self.dest)
for filetype in values.split(","):
filetypes.add(filetype if filetype.startswith(".") else "." + filetype)
setattr(namespace, self.dest, filetypes)
class SetRuleThreshold(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, find_severity_value(values))
class SetListOption(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
pattern = values if values else "*"
if "*" in pattern:
pattern = translate_pattern(pattern)
setattr(namespace, self.dest, pattern)
class CustomArgParser(argparse.ArgumentParser):
def __init__(self, *args, from_cli=False, **kwargs):
self.from_cli = from_cli
super().__init__(*args, **kwargs)
def error(self, message):
if self.from_cli:
super().error(message)
else:
raise InvalidArgumentError(message)
class Config:
def __init__(self, root=None, from_cli=False):
self.from_cli = from_cli
self.exec_dir = os.path.abspath(".")
self.root = Path(root) if root is not None else root
self.include = set()
self.exclude = set()
self.ignore = set()
self.reports = {"return_status"}
self.threshold = RuleSeverity.INFO
self.configure = []
self.format = "{source}:{line}:{col} [{severity}] {rule_id} {desc} ({name})"
self.paths = ["."]
self.ext_rules = set()
self.include_patterns = []
self.exclude_patterns = []
self.filetypes = {".robot", ".resource", ".tsv"}
self.list = ""
self.list_configurables = ""
self.list_reports = False
self.output = None
self.recursive = True
self.verbose = False
self.config_from = ""
self.parser = self._create_parser()
HELP_MSGS = {
"help_paths": "List of paths (files or directories) to be parsed by Robocop.",
"help_include": "Run Robocop only with specified rules. You can define rule by its name or id.\n"
"Glob patterns are supported.",
"help_exclude": "Ignore specified rules. You can define rule by its name or id.\n"
"Glob patterns are supported.",
"help_ext_rules": "List of paths with custom rules.",
"help_reports": "Generate reports after scan. You can enable reports by listing them in comma\n"
"separated list:\n"
"--reports rules_by_id,rules_by_error_type,scan_timer\n"
"To enable all reports use all:\n"
"--report all",
"help_format": "Format of output message. "
"You can use placeholders to change the way an issue is reported.\n"
"Default: {source}:{line}:{col} [{severity}] {rule_id} {desc} ({name})",
"help_configure": "Configure checker or report with parameter value. Usage:\n"
"-c message_name_or_id:param_name:param_value\nExample:\n"
"-c line-too-long:line_length:150\n"
"--configure 0101:severity:E",
"help_list": "List all available rules. You can use optional pattern argument.",
"help_list_confs": "List all available rules with configurable parameters. "
"You can use optional pattern argument.",
"help_list_reports": "List all available reports.",
"help_output": "Path to output file.",
"help_filetypes": "Comma separated list of file extensions to be scanned by Robocop",
"help_threshold": f"Disable rules below given threshold. Available message levels: "
f'{" < ".join(sev.value for sev in RuleSeverity)}',
"help_recursive": "Use this flag to stop scanning directories recursively.",
"help_argfile": "Path to file with arguments.",
"help_ignore": "Ignore file(s) and path(s) provided. Glob patterns are supported.",
"help_info": "Print this help message and exit.",
"help_version": "Display Robocop version.",
"help_verbose": "Display extra information.",
"directives": "1. Serve the public trust\n2. Protect the innocent\n3. Uphold the law\n4. [ACCESS " "DENIED]",
"epilog": "For full documentation visit: https://github.com/MarketSquare/robotframework-robocop",
}
def remove_severity(self):
self.include = {self.replace_severity_values(rule) for rule in self.include}
self.exclude = {self.replace_severity_values(rule) for rule in self.exclude}
for index, conf in enumerate(self.configure):
if conf.count(":") != 2:
continue
message, param, value = conf.split(":")
message = self.replace_severity_values(message)
self.configure[index] = f"{message}:{param}:{value}"
@staticmethod
def filter_patterns_from_names(only_names, only_patterns):
filtered = set()
for rule in only_names:
if "*" in rule:
only_patterns.append(translate_pattern(rule))
else:
filtered.add(rule)
return filtered
def translate_patterns(self):
self.include = self.filter_patterns_from_names(self.include, self.include_patterns)
self.exclude = self.filter_patterns_from_names(self.exclude, self.exclude_patterns)
def preparse(self, args):
args = sys.argv[1:] if args is None else args
parsed_args = []
args = (arg for arg in args)
for arg in args:
if arg in ("-A", "--argumentfile"):
try:
argfile = next(args)
except StopIteration:
raise ArgumentFileNotFoundError("") from None
parsed_args += self.load_args_from_file(argfile)
else:
parsed_args.append(arg)
return parsed_args
def load_args_from_file(self, argfile):
try:
with FileReader(argfile) as arg_f:
args = []
for line in arg_f.readlines():
if line.strip().startswith("#"):
continue
for arg in line.split(" ", 1):
arg = arg.strip()
if not arg:
continue
args.append(arg)
if "-A" in args or "--argumentfile" in args:
raise NestedArgumentFileError(argfile)
if args:
self.config_from = argfile
return args
except FileNotFoundError:
raise ArgumentFileNotFoundError(argfile) from None
def _create_parser(self):
parser = CustomArgParser(
prog="robocop",
formatter_class=argparse.RawTextHelpFormatter,
description="Static code analysis tool for Robot Framework",
epilog=self.HELP_MSGS["epilog"],
add_help=False,
from_cli=self.from_cli,
)
required = parser.add_argument_group(title="Required parameters")
optional = parser.add_argument_group(title="Optional parameters")
required.add_argument(
"paths",
metavar="paths",
type=str,
nargs="*",
default=self.paths,
help=self.HELP_MSGS["help_paths"],
)
optional.add_argument(
"-i",
"--include",
action=ParseDelimitedArgAction,
default=self.include,
metavar="RULES",
help=self.HELP_MSGS["help_include"],
)
optional.add_argument(
"-e",
"--exclude",
action=ParseDelimitedArgAction,
default=self.exclude,
metavar="RULES",
help=self.HELP_MSGS["help_exclude"],
)
optional.add_argument(
"-rules",
"--ext-rules",
action=ParseDelimitedArgAction,
default=self.ext_rules,
help=self.HELP_MSGS["help_ext_rules"],
)
optional.add_argument(
"-nr",
"--no-recursive",
dest="recursive",
action="store_false",
help=self.HELP_MSGS["help_recursive"],
)
optional.add_argument(
"-r",
"--reports",
action=ParseDelimitedArgAction,
default=self.reports,
help=self.HELP_MSGS["help_reports"],
)
optional.add_argument(
"-f",
"--format",
type=str,
default=self.format,
help=self.HELP_MSGS["help_format"],
)
optional.add_argument(
"-c",
"--configure",
action=ParseCheckerConfig,
default=self.configure,
metavar="CONFIGURABLE",
help=self.HELP_MSGS["help_configure"],
)
optional.add_argument(
"-l",
"--list",
action=SetListOption,
nargs="?",
const="",
default=self.list,
metavar="PATTERN",
help=self.HELP_MSGS["help_list"],
)
optional.add_argument(
"-lc",
"--list-configurables",
action=SetListOption,
nargs="?",
const="",
default=self.list_configurables,
metavar="PATTERN",
help=self.HELP_MSGS["help_list_confs"],
)
optional.add_argument(
"-lr",
"--list-reports",
action="store_true",
default=self.list_reports,
help=self.HELP_MSGS["help_list_reports"],
)
optional.add_argument(
"-o",
"--output",
type=argparse.FileType("w"),
default=self.output,
metavar="PATH",
help=self.HELP_MSGS["help_output"],
)
optional.add_argument(
"-ft",
"--filetypes",
action=ParseFileTypes,
default=self.filetypes,
help=self.HELP_MSGS["help_filetypes"],
)
optional.add_argument(
"-t",
"--threshold",
action=SetRuleThreshold,
default=self.threshold,
help=self.HELP_MSGS["help_threshold"],
)
optional.add_argument("-A", "--argumentfile", metavar="PATH", help=self.HELP_MSGS["help_argfile"])
optional.add_argument(
"-g",
"--ignore",
action=ParseDelimitedArgAction,
default=self.ignore,
metavar="PATH",
help=self.HELP_MSGS["help_ignore"],
)
optional.add_argument("-h", "--help", action="help", help=self.HELP_MSGS["help_info"])
optional.add_argument(
"-v",
"--version",
action="version",
version=__version__,
help=self.HELP_MSGS["help_version"],
)
optional.add_argument("-vv", "--verbose", action="store_true", help=self.HELP_MSGS["help_verbose"])
optional.add_argument(
"--directives",
action="version",
version=self.HELP_MSGS["directives"],
help=argparse.SUPPRESS,
)
return parser
def parse_opts(self, args=None, from_cli=True):
args = self.preparse(args) if from_cli else None
if not args or args == ["--verbose"] or args == ["-vv"]:
loaded_args = self.load_default_config_file()
if loaded_args is None:
self.load_pyproject_file()
else:
# thanks for this we can have config file together with some cli options like --verbose
args = [*args, *loaded_args] if args is not None else loaded_args
if args:
args = self.parser.parse_args(args)
for key, value in dict(**vars(args)).items():
if key in self.__dict__:
self.__dict__[key] = value
self.remove_severity()
self.translate_patterns()
if self.verbose:
if self.config_from:
print(f"Loaded configuration from {self.config_from}")
else:
print("No config file found or configuration is empty. Using default configuration")
return args
def load_default_config_file(self):
robocop_path = self.find_file_in_project_root(".robocop")
if robocop_path.is_file():
return self.load_args_from_file(robocop_path)
return None
def find_file_in_project_root(self, config_name):
root = self.root or Path.cwd()
for parent in (root, *root.parents):
if (parent / ".git").exists() or (parent / config_name).is_file():
return parent / config_name
return parent / config_name
def load_pyproject_file(self):
pyproject_path = self.find_file_in_project_root("pyproject.toml")
if not pyproject_path.is_file():
return
try:
config = toml.load(str(pyproject_path))
except toml.TomlDecodeError as err:
raise InvalidArgumentError(f"Failed to decode {str(pyproject_path)}: {err}") from None
config = config.get("tool", {}).get("robocop", {})
if self.parse_toml_to_config(config):
self.config_from = pyproject_path
@staticmethod
def replace_in_set(container, old_key, new_key):
if old_key not in container:
return
container.remove(old_key)
container.add(new_key)
def validate_rule_names(self, rules):
# add rule name in form of old_name: new_name
deprecated = {
"missing-whitespace-after-setting": "not-enough-whitespace-after-setting",
"variable-should-left-aligned": "variable-should-be-left-aligned",
"0304": "0406",
"invalid-char-in-name": "not-allowed-char-in-name",
}
for rule in chain(self.include, self.exclude):
if rule in deprecated: # update warning description to specific case
print(
f"### DEPRECATION WARNING ###\nThe name (or ID) of the rule '{rule}' is "
f"renamed to '{deprecated[rule]}'. "
f"Update your configuration if you're using old name. "
f"This information will disappear in the next version (1.12.0)\n\n"
)
self.replace_in_set(self.include, rule, deprecated[rule])
self.replace_in_set(self.exclude, rule, deprecated[rule])
elif rule not in rules:
similar = RecommendationFinder().find_similar(rule, rules)
raise ConfigGeneralError(f"Provided rule '{rule}' does not exist.{similar}")
def is_rule_enabled(self, rule):
if self.is_rule_disabled(rule):
return False
if self.include or self.include_patterns: # if any include pattern, it must match with something
if rule.rule_id in self.include or rule.name in self.include:
return True
for pattern in self.include_patterns:
if pattern.match(rule.rule_id) or pattern.match(rule.name):
return True
return False
return True
def is_rule_disabled(self, rule):
if rule.severity < self.threshold:
return True
if rule.rule_id in self.exclude or rule.name in self.exclude:
return True
for pattern in self.exclude_patterns:
if pattern.match(rule.rule_id) or pattern.match(rule.name):
return True
return False
def is_path_ignored(self, path):
for pattern in self.ignore:
if path.match(pattern):
return True
return False
@staticmethod
def replace_severity_values(message):
sev = "".join(c.value for c in RuleSeverity)
if re.match(f"[{sev}][0-9]{{4,}}", message):
for char in sev:
message = message.replace(char, "")
return message
def parse_toml_to_config(self, toml_data):
if not toml_data:
return False
assign_type = {"paths", "format", "configure"}
set_type = {"include", "exclude", "reports", "ignore", "ext_rules"}
toml_data = {key.replace("-", "_"): value for key, value in toml_data.items()}
for key, value in toml_data.items():
if key in assign_type:
self.__dict__[key] = value
elif key in set_type:
self.__dict__[key].update(set(value))
elif key == "filetypes":
for filetype in toml_data["filetypes"]:
self.filetypes.add(filetype if filetype.startswith(".") else "." + filetype)
elif key == "threshold":
self.threshold = find_severity_value(value)
elif key == "output":
self.output = open(value, "w")
elif key == "no_recursive":
self.recursive = not value
elif key == "verbose":
self.verbose = value
else:
raise InvalidArgumentError(f"Option '{key}' is not supported in pyproject.toml configuration file.")
return True
|
11527624
|
import os
import sqlite3
import unittest
from contextlib import redirect_stdout
from linkml_runtime.utils.compile_python import compile_python
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from linkml.generators.sqlddlgen import SQLDDLGenerator
from tests.test_generators.environment import env
from tests.test_generators.test_pythongen import make_python
SCHEMA = env.input_path('kitchen_sink.yaml')
DB = env.expected_path('kitchen_sink.db')
SQLA_CODE = env.expected_path('kitchen_sink_db_mapping.py')
DDL_PATH = env.expected_path('kitchen_sink.ddl.sql')
BASIC_DDL_PATH = env.expected_path('kitchen_sink.basic.ddl.sql')
BASIC_SQLA_CODE = env.expected_path('kitchen_sink_basic_db_mapping.py')
SQLDDLLOG = env.expected_path('sqlddl_log.txt')
NAME = 'fred'
CITY = 'Gotham city'
def create_and_compile_sqla_bindings(gen: SQLDDLGenerator, path: str = SQLA_CODE):
with open(path, 'w') as stream:
with redirect_stdout(stream):
gen.write_sqla_python_imperative('.kitchen_sink')
module = compile_python(path)
return module
class SQLDDLTestCase(unittest.TestCase):
def test_sqlddl_basic(self):
""" DDL """
gen = SQLDDLGenerator(SCHEMA, mergeimports=True, direct_mapping=True)
ddl = gen.serialize()
with open(BASIC_DDL_PATH, 'w') as stream:
stream.write(ddl)
with open(BASIC_SQLA_CODE, 'w') as stream:
with redirect_stdout(stream):
gen.write_sqla_python_imperative('output.kitchen_sink')
# don't load this - will conflict
#create_and_compile_sqla_bindings(gen, BASIC_SQLA_CODE)
def test_sqlddl(self):
""" DDL """
kitchen_module = make_python(False)
gen = SQLDDLGenerator(SCHEMA, mergeimports=True, rename_foreign_keys=True)
ddl = gen.serialize()
with open(SQLDDLLOG, 'w') as log:
# with open(DDL_PATH, 'w') as stream:
# stream.write(ddl)
#print(ddl)
try:
os.remove(DB)
except OSError:
pass
con = sqlite3.connect(DB)
cur = con.cursor()
cur.executescript(ddl)
cur.execute("INSERT INTO Person (id, name, age_in_years) VALUES (?,?,?)", ('P1', NAME, 33))
cur.execute("INSERT INTO Person_aliases (backref_id, aliases) VALUES (?,?)", ('P1', 'wibble'))
cur.execute("INSERT INTO Address (Person_id, street, city) VALUES (?,?,?)", ('P1', '99 foo street', 'SF'))
cur.execute("select * from Person where name=:name", {"name": NAME})
log.write(f"{cur.fetchall()}\n")
con.commit()
con.close()
#print(gen.to_sqla_python())
#output = StringIO()
#with redirect_stdout(output):
# gen.write_sqla_python_imperative('output.kitchen_sink')
#print(output.getvalue())
#with open(SQLA_CODE, 'w') as stream:
# stream.write(output.getvalue())
kitchen_module = create_and_compile_sqla_bindings(gen, SQLA_CODE)
# test SQLA
engine = create_engine(f'sqlite:///{DB}')
Session = sessionmaker(bind=engine)
session = Session()
q = session.query(kitchen_module.Person).where(kitchen_module.Person.name == NAME)
log.write(f'Q={q}\n')
#for row in q.all():
# print(f'Row={row}')
agent = kitchen_module.Agent(id='Agent03')
log.write(f'Agent={agent}\n')
activity = kitchen_module.Activity(id='Act01', was_associated_with=agent)
session.add(agent)
session.add(activity)
session.flush()
q = session.query(kitchen_module.Activity)
for row in q.all():
log.write(f'Row={row}\n')
#person = Person(id='P22', name='joe', addresses=[Address(street='1 Acacia Ave', city='treetown')])
person = kitchen_module.Person(id='P22', name='joe',
aliases=['foo'],
addresses=[kitchen_module.Address(street='1 random streer', city=CITY)],
has_employment_history=[kitchen_module.EmploymentEvent(started_at_time='2020-01-01', is_current=True)],
has_familial_relationships=[],
has_medical_history=[])
person = kitchen_module.Person(id='P22', name='joe')
log.write(f'Aliases={person.aliases}\n')
session.flush()
#todo: fix this
#session.add(person)
org = kitchen_module.Organization(id='org1', name='foo org', aliases=['bar org'])
org.aliases = ['abc def']
session.add(org)
session.flush()
for o in session.query(kitchen_module.Organization).all():
log.write(f'org = {o}\n')
q = session.query(kitchen_module.Person)
p: kitchen_module.Person
is_found_address = False
for p in q.all():
log.write(f'Person={p.id} {p.name}\n')
for a in p.addresses:
log.write(f' Address={a}\n')
#if a.city == CITY:
# is_found_address = True
#for alias in p.aliases:
# print(f' AKA={a}')
#assert is_found_address
session.commit()
if __name__ == '__main__':
unittest.main()
|
11527650
|
from django.conf.urls import re_path
from .views import (
proposal_submit,
proposal_submit_kind,
proposal_detail,
proposal_edit,
proposal_speaker_manage,
proposal_cancel,
proposal_pending_join,
proposal_pending_decline,
document_create,
document_delete,
document_download,
)
urlpatterns = [
re_path(r"^submit/$", proposal_submit, name="proposal_submit"),
re_path(
r"^submit/([\w\-]+)/$",
proposal_submit_kind,
name="proposal_submit_kind",
),
re_path(r"^(\d+)/$", proposal_detail, name="proposal_detail"),
re_path(r"^(\d+)/edit/$", proposal_edit, name="proposal_edit"),
re_path(
r"^(\d+)/speakers/$",
proposal_speaker_manage,
name="proposal_speaker_manage",
),
re_path(r"^(\d+)/cancel/$", proposal_cancel, name="proposal_cancel"),
re_path(
r"^(\d+)/join/$", proposal_pending_join, name="proposal_pending_join"
),
re_path(
r"^(\d+)/decline/$",
proposal_pending_decline,
name="proposal_pending_decline",
),
re_path(
r"^(\d+)/document/create/$",
document_create,
name="proposal_document_create",
),
re_path(
r"^document/(\d+)/delete/$",
document_delete,
name="proposal_document_delete",
),
re_path(
r"^document/(\d+)/([^/]+)$",
document_download,
name="proposal_document_download",
),
]
|
11527729
|
from __future__ import absolute_import
input_name = '../examples/linear_elasticity/linear_elastic_damping.py'
output_name_trunk = 'test_linear_elastic_damping'
from tests_basic import TestInputEvolutionary
class Test( TestInputEvolutionary ):
pass
|
11527731
|
from django.core.management import BaseCommand
from ...models import Metric
from ...utils import reset_generation_key
class Command(BaseCommand):
def handle(self, **options):
verbose = int(options.get('verbosity', 0))
for MC in Metric.__subclasses__():
for metric in MC.objects.all():
if verbose:
self.stdout.write("Updating %s ... " % metric.name.lower(), ending="")
datum = metric.data.create(measurement=metric.fetch())
if verbose:
print(datum.measurement)
reset_generation_key()
|
11527743
|
from builtins import chr
from builtins import object
class Observable(object):
def __init__(self):
self.Callbacks = []
def addHandler(self, h):
if h not in self.Callbacks:
self.Callbacks.append(h)
def notify(self, rows, cols):
for cbk in self.Callbacks:
cbk.update_geometry(rows, cols)
class ViewMode(Observable, object):
SPACER = 4
def __init__(self):
super(ViewMode, self).__init__()
self.selector = None
self._edit = False
"""
Convert IBM437 character codes 0x00 - 0xFF into Unicode.
http://svn.openmoko.org/trunk/src/host/qemu-neo1973/phonesim/lib/serial/qatutils.cpp
"""
cp437ToUnicode = [0x0020, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007,
0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017,
0x0018, 0x0019, 0x001c, 0x001b, 0x007f, 0x001d, 0x001e, 0x001f,
0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027,
0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
0x0030, 0x0031, 0x0032, 0x0033, 0x0034, 0x0035, 0x0036, 0x0037,
0x0038, 0x0039, 0x003a, 0x003b, 0x003c, 0x003d, 0x003e, 0x003f,
0x0040, 0x0041, 0x0042, 0x0043, 0x0044, 0x0045, 0x0046, 0x0047,
0x0048, 0x0049, 0x004a, 0x004b, 0x004c, 0x004d, 0x004e, 0x004f,
0x0050, 0x0051, 0x0052, 0x0053, 0x0054, 0x0055, 0x0056, 0x0057,
0x0058, 0x0059, 0x005a, 0x005b, 0x005c, 0x005d, 0x005e, 0x005f,
0x0060, 0x0061, 0x0062, 0x0063, 0x0064, 0x0065, 0x0066, 0x0067,
0x0068, 0x0069, 0x006a, 0x006b, 0x006c, 0x006d, 0x006e, 0x006f,
0x0070, 0x0071, 0x0072, 0x0073, 0x0074, 0x0075, 0x0076, 0x0077,
0x0078, 0x0079, 0x007a, 0x007b, 0x007c, 0x007d, 0x007e, 0x001a,
0x00c7, 0x00fc, 0x00e9, 0x00e2, 0x00e4, 0x00e0, 0x00e5, 0x00e7,
0x00ea, 0x00eb, 0x00e8, 0x00ef, 0x00ee, 0x00ec, 0x00c4, 0x00c5,
0x00c9, 0x00e6, 0x00c6, 0x00f4, 0x00f6, 0x00f2, 0x00fb, 0x00f9,
0x00ff, 0x00d6, 0x00dc, 0x00a2, 0x00a3, 0x00a5, 0x20a7, 0x0192,
0x00e1, 0x00ed, 0x00f3, 0x00fa, 0x00f1, 0x00d1, 0x00aa, 0x00ba,
0x00bf, 0x2310, 0x00ac, 0x00bd, 0x00bc, 0x00a1, 0x00ab, 0x00bb,
0x2591, 0x2592, 0x2593, 0x2502, 0x2524, 0x2561, 0x2562, 0x2556,
0x2555, 0x2563, 0x2551, 0x2557, 0x255d, 0x255c, 0x255b, 0x2510,
0x2514, 0x2534, 0x252c, 0x251c, 0x2500, 0x253c, 0x255e, 0x255f,
0x255a, 0x2554, 0x2569, 0x2566, 0x2560, 0x2550, 0x256c, 0x2567,
0x2568, 0x2564, 0x2565, 0x2559, 0x2558, 0x2552, 0x2553, 0x256b,
0x256a, 0x2518, 0x250c, 0x2588, 0x2584, 0x258c, 0x2590, 0x2580,
0x03b1, 0x00df, 0x0393, 0x03c0, 0x03a3, 0x03c3, 0x03bc, 0x03c4,
0x03a6, 0x0398, 0x03a9, 0x03b4, 0x221e, 0x03c6, 0x03b5, 0x2229,
0x2261, 0x00b1, 0x2265, 0x2264, 0x2320, 0x2321, 0x00f7, 0x2248,
0x00b0, 0x2219, 0x00b7, 0x221a, 0x207f, 0x00b2, 0x25a0, 0x00a0]
def cp437(self, c):
return chr(self.cp437ToUnicode[c])
def getPageOffset(self):
NotImplementedError('method not implemented.')
# return cols, rows of the view
def getGeometry(self):
NotImplementedError('method not implemented.')
def startSelection(self):
NotImplementedError('method not implemented.')
def stopSelection(self):
NotImplementedError('method not implemented.')
def draw(self, refresh=False):
NotImplementedError('method not implemented.')
# returns x,y cursor position in page
def getCursorOffsetInPage(self):
x, y = self.cursor.getPosition()
return y * self.COLUMNS + x
def handleKeyPressEvent(self, modifier, key):
raise Exception("not implemented")
def handleKeyReleaseEvent(self, modifier, key):
raise Exception("not implemented")
# get what's on the screen
# pageOffset - which page. None - current page
#
# return bytearray containing what it is displayed currently on the screen
def getDisplayablePage(self, pageOffset=None):
data = self.dataModel.getData()
dataOffset = self.dataModel.getOffset()
cols, rows = self.getGeometry()
if pageOffset:
return bytearray(data[dataOffset:dataOffset + rows * cols * pageOffset])
return bytearray(data[dataOffset:dataOffset + rows * cols])
# moves cursor to offset in page, or moves to page
def goTo(self, offset):
# typical goTo
if self.dataModel.offsetInPage(offset):
# if in current page, move cursore
x, y = self.dataModel.getXYInPage(offset)
self.cursor.moveAbsolute(y, x)
self.draw(refresh=False)
else:
# else, move page
self.dataModel.goTo(offset)
self.cursor.moveAbsolute(0, 0)
self.draw(refresh=True)
# self.draw(refresh=False)
if self.widget:
self.widget.update()
def isEditable(self):
return False
def setEditMode(self, value):
self._edit = value
def isInEditMode(self):
return self._edit
def getHeaderInfo(self):
return ''
|
11527803
|
import math
def main():
s1 = float(raw_input("Enter the sides: "))
s2 = float(raw_input("Enter the sides: "))
s3 = float(raw_input("Enter the sides: "))
area1 = area(s1,s2,s3)
print "The area is", area1
def area(p1,p2,p3):
s = (p1+p2+p3)/ 2
print s
a = math.sqrt(float(s*(s-p1)*(s-p2)*(s-p3)))
return a
main()
|
11527807
|
import os
from click.testing import CliRunner
from hatch.cli import hatch
from hatch.env import (
get_editable_packages, get_installed_packages, install_packages
)
from hatch.utils import env_vars, temp_chdir
from hatch.venv import create_venv, is_venv, venv
from ..utils import requires_internet, wait_until
def create_test_passing(d):
with open(os.path.join(d, 'tests', 'test_add.py'), 'w') as f:
f.write(
'def test_add():\n'
' assert 1 + 2 == 3\n'
)
def create_test_failing(d):
with open(os.path.join(d, 'tests', 'test_add.py'), 'w') as f:
f.write(
'def test_add():\n'
' assert 1 + 2 != 3\n'
)
def create_test_complete_coverage(d, pkg):
with open(os.path.join(d, pkg, 'core.py'), 'w') as f:
f.write(
'def square_5():\n'
' return 5 ** 2\n'
)
with open(os.path.join(d, 'tests', 'test_core.py'), 'w') as f:
f.write(
'from {pkg}.core import square_5\n'
'def test_square_5():\n'
' assert square_5() == 25\n'.format(pkg=pkg)
)
def create_test_incomplete_coverage(d, pkg):
with open(os.path.join(d, pkg, 'core.py'), 'w') as f:
f.write(
'def square_5():\n'
' return 5 ** 2\n'
)
with open(os.path.join(d, 'tests', 'test_add.py'), 'w') as f:
f.write(
'import {}\n'
'def test_add():\n'
' assert 1 + 2 == 3\n'.format(pkg)
)
def test_passing_cwd():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '--basic', '-ne'])
create_test_passing(d)
result = runner.invoke(hatch, ['test', '-nd'])
assert result.exit_code == 0
assert '1 passed' in result.output
def test_failing_cwd():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '--basic', '-ne'])
create_test_failing(d)
result = runner.invoke(hatch, ['test', '-nd'])
assert result.exit_code == 1
assert '1 failed' in result.output
@requires_internet
def test_project_existing_venv():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '--basic'])
venv_dir = os.path.join(d, 'venv')
wait_until(is_venv, venv_dir)
with venv(venv_dir):
install_packages(['pytest', 'coverage'])
installed_packages = get_installed_packages(editable=False)
assert 'pytest' in installed_packages
assert 'coverage' in installed_packages
create_test_passing(d)
with env_vars({'_IGNORE_VENV_': '1'}):
result = runner.invoke(hatch, ['test'])
assert result.exit_code == 0
assert '1 passed' in result.output
@requires_internet
def test_project_no_venv():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '--basic', '-ne'])
create_test_passing(d)
with env_vars({'_IGNORE_VENV_': '1'}):
result = runner.invoke(hatch, ['test'])
with venv(os.path.join(d, 'venv')):
assert 'ok' in get_editable_packages()
installed_packages = get_installed_packages(editable=False)
assert 'pytest' in installed_packages
assert 'coverage' in installed_packages
assert result.exit_code == 0
assert 'A project has been detected!' in result.output
assert 'Creating a dedicated virtual env... complete!' in result.output
assert 'Installing this project in the virtual env...' in result.output
assert 'Ensuring pytest and coverage are available...' in result.output
assert '1 passed' in result.output
@requires_internet
def test_project_no_venv_install_dev_requirements():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '--basic', '-ne'])
with open(os.path.join(d, 'dev-requirements.txt'), 'w') as f:
f.write('six\n')
create_test_passing(d)
with env_vars({'_IGNORE_VENV_': '1'}):
result = runner.invoke(hatch, ['test'])
with venv(os.path.join(d, 'venv')):
assert 'ok' in get_editable_packages()
installed_packages = get_installed_packages(editable=False)
assert 'pytest' in installed_packages
assert 'coverage' in installed_packages
assert 'six' in installed_packages
assert result.exit_code == 0
assert 'A project has been detected!' in result.output
assert 'Creating a dedicated virtual env... complete!' in result.output
assert 'Installing this project in the virtual env...' in result.output
assert 'Ensuring pytest and coverage are available...' in result.output
assert 'Installing test dependencies in the virtual env...' in result.output
assert '1 passed' in result.output
@requires_internet
def test_project_no_venv_coverage():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '--basic', '-ne'])
create_test_complete_coverage(d, 'ok')
with env_vars({'_IGNORE_VENV_': '1'}):
result = runner.invoke(hatch, ['test', '-c'])
assert result.exit_code == 0
assert '1 passed' in result.output
assert result.output.strip().endswith(' 100%')
@requires_internet
def test_project_no_venv_coverage_merge():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '--basic', '-ne'])
create_test_complete_coverage(d, 'ok')
with env_vars({'_IGNORE_VENV_': '1'}):
runner.invoke(hatch, ['test', '-c'])
result = runner.invoke(hatch, ['test', '-c', '-m'])
assert result.exit_code == 0
assert '1 passed' in result.output
assert result.output.strip().endswith(' 100%')
@requires_internet
def test_package():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', 'ok', '--basic', '-ne'])
package_dir = os.path.join(d, 'ok')
create_test_passing(package_dir)
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
os.chdir(package_dir)
install_packages(['-e', '.'])
os.chdir(d)
result = runner.invoke(hatch, ['test', '-nd', 'ok', '-g'])
assert result.exit_code == 0
assert '1 passed' in result.output
def test_package_not_exist():
with temp_chdir() as d:
runner = CliRunner()
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
result = runner.invoke(hatch, ['test', '-nd', 'ok'])
assert result.exit_code == 1
assert '`{}` is not an editable package.'.format('ok') in result.output
@requires_internet
def test_local():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', 'ok', '--basic', '-ne'])
package_dir = os.path.join(d, 'ok')
create_test_passing(package_dir)
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
install_packages(['-e', package_dir])
result = runner.invoke(hatch, ['test', '-nd', '-l', '-g'])
assert result.exit_code == 0
assert 'Package `ok` has been selected.' in result.output
assert '1 passed' in result.output
def test_local_not_exist():
with temp_chdir() as d:
runner = CliRunner()
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
result = runner.invoke(hatch, ['test', '-nd', '-l', '-g'])
assert result.exit_code == 1
assert 'There are no local packages available.' in result.output
@requires_internet
def test_local_multiple():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', 'ok', '--basic', '-ne'])
runner.invoke(hatch, ['new', 'ko', '--basic', '-ne'])
venv_dir = os.path.join(d, 'venv')
create_venv(venv_dir)
with venv(venv_dir):
install_packages(['-e', os.path.join(d, 'ok')])
install_packages(['-e', os.path.join(d, 'ko')])
result = runner.invoke(hatch, ['test', '-nd', '-l', '-g'])
assert result.exit_code == 1
assert (
'There are multiple local packages available. '
'Select one with the optional argument.'
) in result.output
def test_path_relative():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', 'ok', '--basic', '-ne'])
create_test_passing(os.path.join(d, 'ok'))
result = runner.invoke(hatch, ['test', '-nd', '-p', 'ok'])
assert result.exit_code == 0
assert '1 passed' in result.output
def test_path_full():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', 'ok', '--basic', '-ne'])
runner.invoke(hatch, ['new', 'ko', '--basic', '-ne'])
package_dir = os.path.join(d, 'ok')
create_test_passing(package_dir)
os.chdir(os.path.join(d, 'ko'))
result = runner.invoke(hatch, ['test', '-nd', '-p', os.path.join(d, 'ok')])
assert result.exit_code == 0
assert '1 passed' in result.output
def test_path_full_not_exist():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['new', 'ok', '--basic', '-ne'])
full_path = os.path.join(d, 'ko')
result = runner.invoke(hatch, ['test', '-nd', '-p', full_path])
assert result.exit_code == 1
assert 'Directory `{}` does not exist.'.format(full_path) in result.output
def test_coverage_complete():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '--basic', '-ne'])
create_test_complete_coverage(d, 'ok')
result = runner.invoke(hatch, ['test', '-nd', '-c'])
assert result.exit_code == 0
assert '1 passed' in result.output
assert result.output.strip().endswith(' 100%')
def test_coverage_complete_merge():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '--basic', '-ne'])
create_test_complete_coverage(d, 'ok')
runner.invoke(hatch, ['test', '-nd', '-c'])
result = runner.invoke(hatch, ['test', '-nd', '-c', '-m'])
assert result.exit_code == 0
assert '1 passed' in result.output
assert result.output.strip().endswith(' 100%')
def test_coverage_incomplete():
with temp_chdir() as d:
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '--basic', '-ne'])
create_test_incomplete_coverage(d, 'ok')
result = runner.invoke(hatch, ['test', '-nd', '-c'])
assert result.exit_code == 0
assert '1 passed' in result.output
assert not result.output.strip().endswith(' 100%')
def test_test_args():
with temp_chdir():
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '--basic', '-ne'])
result = runner.invoke(hatch, ['test', '-nd', '-ta', '--help'])
assert '-k EXPRESSION' in result.output
def test_coverage_args():
with temp_chdir():
runner = CliRunner()
runner.invoke(hatch, ['init', 'ok', '--basic', '-ne'])
result = runner.invoke(hatch, ['test', '-nd', '-c', '-ca', '--help'])
assert '--parallel-mode' in result.output
|
11527830
|
import django.http
def django_response(request):
resp = django.http.HttpResponse()
resp.set_cookie("name", "value", secure=False,
httponly=False, samesite='None')
return resp
def django_response():
response = django.http.HttpResponse()
response['Set-Cookie'] = "name=value; SameSite=None;"
return response
def django_response(request):
resp = django.http.HttpResponse()
resp.set_cookie(django.http.request.GET.get("name"),
django.http.request.GET.get("value"),
secure=False, httponly=False, samesite='None')
return resp
def django_response():
response = django.http.HttpResponse()
response['Set-Cookie'] = f"{django.http.request.GET.get('name')}={django.http.request.GET.get('value')}; SameSite=None;"
return response
|
11527863
|
import base64
import json
import os
import time
from mock import patch
from threading import Thread
from rancher_gen.handler import RancherConnector, MessageHandler
from rancher_gen.compat import b64encode
class TestRancherConnector:
@classmethod
def setup_class(cls):
template = os.path.join(os.path.dirname(__file__), 'fixtures',
'template.j2')
cls.out_file = '/tmp/out.txt'
cls.config = {
'host': os.getenv('RANCHER_HOST'),
'port': int(os.getenv('RANCHER_PORT', 80)),
'project_id': None,
'access_key': os.getenv('RANCHER_ACCESS_KEY'),
'secret_key': os.getenv('RANCHER_SECRET_KEY'),
'templates': ['{0}:{1}'.format(template, cls.out_file)],
'ssl': False,
'stack': 'teststack',
'services': ['hello1', 'hello2'],
'notify': None
}
@classmethod
def teardown_class(cls):
if os.path.exists(cls.out_file):
os.remove(cls.out_file)
def test_prerenders_template(self, stack_service):
stack, service = stack_service
self.config['project_id'] = stack['accountId']
# Test with filtering by stack and service
handler = RancherConnector(**self.config)
handler._prerender()
with open(self.out_file) as fh:
output = fh.read().replace('\n', '').strip()
assert '10.42.232.33' in output
assert '10.42.232.34' in output
# Test with filtering by stack only
config = self.config.copy()
config['project_id'] = stack['accountId']
config['services'] = None
handler = RancherConnector(**config)
handler._prerender()
with open(self.out_file) as fh:
output = fh.read().replace('\n', '').strip()
assert '10.42.232.33' in output
assert '10.42.232.34' in output
# Test without filtering
config = self.config.copy()
config['project_id'] = stack['accountId']
config['stack'] = None
config['services'] = None
handler = RancherConnector(**config)
handler._prerender()
with open(self.out_file) as fh:
output = fh.read().replace('\n', '').strip()
assert '10.42.232.33' in output
def test_on_message_ignores_bad_messages(self):
handler = RancherConnector(**self.config)
mock_msg = {
'name': 'bad',
'data': []
}
# Test with bad name
with patch.object(MessageHandler, 'run') as mock:
handler._on_message(None, json.dumps(mock_msg))
assert not mock.called
# Test with missing data
mock_msg['name'] = 'resource.change'
with patch.object(MessageHandler, 'run') as mock:
handler._on_message(None, json.dumps(mock_msg))
assert not mock.called
def test_on_message_calls_handler(self):
handler = RancherConnector(**self.config)
mock_msg = {
'name': 'resource.change',
'data': [1, 2, 3]
}
with patch.object(MessageHandler, 'run') as mock:
handler._on_message(None, json.dumps(mock_msg))
assert mock.called
class TestMessageHandler:
def setup_method(self, method):
self.out_file = '/tmp/out.txt'
def teardown_method(self, method):
if os.path.exists(self.out_file):
os.remove(self.out_file)
def test_renders_template(self, stack_service, mock_message):
stack, services = stack_service
access_key = os.getenv('RANCHER_ACCESS_KEY')
secret_key = os.getenv('RANCHER_SECRET_KEY')
api_token = b64encode("{0}:{1}".format(access_key, secret_key))
template = os.path.join(os.path.dirname(__file__), 'fixtures',
'template.j2')
config = {
'message': mock_message,
'host': os.getenv('RANCHER_HOST'),
'port': int(os.getenv('RANCHER_PORT', 80)),
'project_id': stack['accountId'],
'api_token': api_token,
'templates': ['{0}:{1}'.format(template, self.out_file)],
'ssl': False,
'stack': 'teststack',
'services': ['hello1', 'hello2'],
'notify': None
}
# Test with stack and service filter
handler = MessageHandler(**config)
handler.run()
while not os.path.exists(self.out_file):
time.sleep(1)
with open(self.out_file) as fh:
output = fh.read().replace('\n', '').strip()
assert '10.42.232.33' in output
assert '10.42.232.34' in output
# Test with stack only filter
config['project_id'] = stack['accountId']
config['services'] = None
handler = MessageHandler(**config)
handler.run()
while not os.path.exists(self.out_file):
time.sleep(1)
with open(self.out_file) as fh:
output = fh.read().replace('\n', '').strip()
assert '10.42.232.33' in output
assert '10.42.232.34' in output
# Test without filter
config['stack'] = None
handler = MessageHandler(**config)
handler.run()
while not os.path.exists(self.out_file):
time.sleep(1)
with open(self.out_file) as fh:
output = fh.read().replace('\n', '').strip()
assert '10.42.232.33' in output
def test_does_not_render_with_missing_labels_in_message(
self, stack_service, mock_message):
stack, service = stack_service
mock_message['data']['resource']['labels'] = None
access_key = os.getenv('RANCHER_ACCESS_KEY')
secret_key = os.getenv('RANCHER_SECRET_KEY')
api_token = b64encode("{0}:{1}".format(access_key, secret_key))
template = os.path.join(os.path.dirname(__file__), 'fixtures',
'template.j2')
config = {
'message': mock_message,
'host': os.getenv('RANCHER_HOST'),
'port': int(os.getenv('RANCHER_PORT', 80)),
'project_id': stack['accountId'],
'api_token': api_token,
'templates': ['{0}:{1}'.format(template, self.out_file)],
'ssl': False,
'stack': 'teststack',
'services': ['badservice'],
'notify': None
}
handler = MessageHandler(**config)
handler.run()
time.sleep(1)
assert not os.path.exists(self.out_file)
def test_does_not_render_with_missing_stack_name_in_message(
self, stack_service, mock_message):
stack, service = stack_service
del mock_message['data']['resource']['labels']['io.rancher.stack.name']
access_key = os.getenv('RANCHER_ACCESS_KEY')
secret_key = os.getenv('RANCHER_SECRET_KEY')
api_token = b64encode("{0}:{1}".format(access_key, secret_key))
template = os.path.join(os.path.dirname(__file__), 'fixtures',
'template.j2')
config = {
'message': mock_message,
'host': os.getenv('RANCHER_HOST'),
'port': int(os.getenv('RANCHER_PORT', 80)),
'project_id': stack['accountId'],
'api_token': api_token,
'templates': ['{0}:{1}'.format(template, self.out_file)],
'ssl': False,
'stack': 'teststack',
'services': ['badservice'],
'notify': None
}
handler = MessageHandler(**config)
handler.run()
time.sleep(1)
assert not os.path.exists(self.out_file)
def test_does_not_render_with_invalid_filter(
self, stack_service, mock_message):
stack, service = stack_service
access_key = os.getenv('RANCHER_ACCESS_KEY')
secret_key = os.getenv('RANCHER_SECRET_KEY')
api_token = b64encode("{0}:{1}".format(access_key, secret_key))
template = os.path.join(os.path.dirname(__file__), 'fixtures',
'template.j2')
config = {
'message': mock_message,
'host': os.getenv('RANCHER_HOST'),
'port': int(os.getenv('RANCHER_PORT', 80)),
'project_id': stack['accountId'],
'api_token': api_token,
'templates': ['{0}:{1}'.format(template, self.out_file)],
'ssl': False,
'ssl': False,
'stack': 'teststack',
'services': ['badservice'],
'notify': None
}
# Test with bad service name
handler1 = MessageHandler(**config)
handler1.start()
time.sleep(1)
assert not os.path.exists(self.out_file)
# test with back stack name
config['stack'] = 'bad'
config['services'] = None
handler2 = MessageHandler(**config)
handler2.start()
time.sleep(1)
assert not os.path.exists(self.out_file)
|
11527881
|
from falcor import *
def render_graph_ToneMapping():
loadRenderPassLibrary("ImageLoader.dll")
loadRenderPassLibrary("ToneMapper.dll")
loadRenderPassLibrary("BlitPass.dll")
testToneMapping = RenderGraph("ToneMapper")
ImageLoader = createPass("ImageLoader", {'filename' : "LightProbes/hallstatt4_hd.hdr", 'mips': False, 'srgb': True})
testToneMapping.addPass(ImageLoader, "ImageLoader")
ToneMapping = createPass("ToneMapper")
testToneMapping.addPass(ToneMapping, "ToneMapping")
BlitPass = createPass("BlitPass", {'filter': SamplerFilter.Linear})
testToneMapping.addPass(BlitPass, "BlitPass")
testToneMapping.addEdge("ImageLoader.dst", "ToneMapping.src")
testToneMapping.addEdge("ToneMapping.dst", "BlitPass.src")
testToneMapping.markOutput("BlitPass.dst")
return testToneMapping
ToneMapping = render_graph_ToneMapping()
try: m.addGraph(ToneMapping)
except NameError: None
|
11527944
|
from .base import TestCase
import mock
from urllib3.response import HTTPResponse
from graphite.finders.remote import RemoteFinder
from graphite.readers.remote import RemoteReader
from graphite.util import pickle, BytesIO, msgpack
from graphite.wsgi import application # NOQA makes sure we have a working WSGI app
#
# Test RemoteReader with multiple WhisperReader instances
#
class RemoteReaderTests(TestCase):
@mock.patch('django.conf.settings.CLUSTER_SERVERS', ['127.0.0.1', '8.8.8.8'])
def test_RemoteReader_init_repr_get_intervals(self):
finders = RemoteFinder.factory()
self.assertEqual(len(finders), 2)
self.assertEqual(finders[0].host, '127.0.0.1')
self.assertEqual(finders[1].host, '8.8.8.8')
finder = finders[0]
reader = RemoteReader(finder,
{'intervals': []},
bulk_query=['a.b.c.d'])
self.assertIsNotNone(reader)
self.assertRegexpMatches(str(reader), r"<RemoteReader\[.*\]: 127.0.0.1 a.b.c.d>")
self.assertEqual(reader.get_intervals(), [])
#
# Test RemoteReader.fetch_multi()
#
@mock.patch('urllib3.PoolManager.request')
@mock.patch('django.conf.settings.CLUSTER_SERVERS', ['127.0.0.1', 'http://8.8.8.8/graphite?format=msgpack&local=0'])
@mock.patch('django.conf.settings.INTRACLUSTER_HTTPS', False)
@mock.patch('django.conf.settings.REMOTE_STORE_USE_POST', False)
@mock.patch('django.conf.settings.FETCH_TIMEOUT', 10)
def test_RemoteReader_fetch_multi(self, http_request):
test_finders = RemoteFinder.factory()
finder = test_finders[0]
startTime = 1496262000
endTime = 1496262060
# no path or bulk_query
reader = RemoteReader(finder,{})
self.assertEqual(reader.bulk_query, [])
result = reader.fetch_multi(startTime, endTime)
self.assertEqual(result, [])
self.assertEqual(http_request.call_count, 0)
# path
reader = RemoteReader(finder, {'intervals': [], 'path': 'a.b.c.d'})
data = [
{
'start': startTime,
'step': 60,
'end': endTime,
'values': [1.0, 0.0, 1.0, 0.0, 1.0],
'name': 'a.b.c.d'
}
]
responseObject = HTTPResponse(body=BytesIO(pickle.dumps(data)), status=200, preload_content=False)
http_request.return_value = responseObject
result = reader.fetch_multi(startTime, endTime)
expected_response = [
{
'pathExpression': 'a.b.c.d',
'name': 'a.b.c.d',
'time_info': (1496262000, 1496262060, 60),
'values': [1.0, 0.0, 1.0, 0.0, 1.0],
}
]
self.assertEqual(result, expected_response)
self.assertEqual(http_request.call_args[0], (
'GET',
'http://127.0.0.1/render/',
))
self.assertEqual(http_request.call_args[1], {
'fields': [
('format', 'pickle'),
('local', '1'),
('noCache', '1'),
('from', startTime),
('until', endTime),
('target', 'a.b.c.d'),
],
'headers': None,
'preload_content': False,
'timeout': 10,
})
# bulk_query & now
finder = test_finders[1]
reader = RemoteReader(finder, {'intervals': [], 'path': 'a.b.c.d'}, bulk_query=['a.b.c.d'])
data = [
{
'start': startTime,
'step': 60,
'end': endTime,
'values': [1.0, 0.0, 1.0, 0.0, 1.0],
'name': 'a.b.c.d'
}
]
responseObject = HTTPResponse(
body=BytesIO(msgpack.dumps(data, use_bin_type=True)),
status=200,
preload_content=False,
headers={'Content-Type': 'application/x-msgpack'}
)
http_request.return_value = responseObject
result = reader.fetch_multi(startTime, endTime, now=endTime, requestContext={'forwardHeaders': {'Authorization': 'Basic xxxx'}})
expected_response = [
{
'pathExpression': 'a.b.c.d',
'name': 'a.b.c.d',
'time_info': (1496262000, 1496262060, 60),
'values': [1.0, 0.0, 1.0, 0.0, 1.0],
}
]
self.assertEqual(result, expected_response)
self.assertEqual(http_request.call_args[0], (
'GET',
'http://8.8.8.8/graphite/render/',
))
self.assertEqual(http_request.call_args[1], {
'fields': [
('format', 'msgpack'),
('local', '0'),
('noCache', '1'),
('from', startTime),
('until', endTime),
('target', 'a.b.c.d'),
('now', endTime),
],
'headers': {'Authorization': 'Basic xxxx'},
'preload_content': False,
'timeout': 10,
})
# non-pickle response
responseObject = HTTPResponse(body=BytesIO(b'error'), status=200, preload_content=False)
http_request.return_value = responseObject
with self.assertRaisesRegexp(Exception, 'Error decoding response from http://[^ ]+: .+'):
reader.fetch(startTime, endTime)
# invalid response data
data = [
{},
]
responseObject = HTTPResponse(
body=BytesIO(msgpack.dumps(data, use_bin_type=True)),
status=200,
preload_content=False,
headers={'Content-Type': 'application/x-msgpack'}
)
http_request.return_value = responseObject
with self.assertRaisesRegexp(Exception, r'Invalid render response from http://[^ ]+: KeyError\(\'name\',?\)'):
reader.fetch(startTime, endTime)
# non-200 response
responseObject = HTTPResponse(body=BytesIO(b'error'), status=500, preload_content=False)
http_request.return_value = responseObject
with self.assertRaisesRegexp(Exception, 'Error response 500 from http://[^ ]+'):
reader.fetch(startTime, endTime)
# exception raised by request()
http_request.side_effect = Exception('error')
with self.assertRaisesRegexp(Exception, 'Error requesting http://[^ ]+: error'):
reader.fetch(startTime, endTime)
#
# Test RemoteReader.fetch()
#
@mock.patch('urllib3.PoolManager.request')
@mock.patch('django.conf.settings.CLUSTER_SERVERS', ['127.0.0.1', '8.8.8.8'])
@mock.patch('django.conf.settings.INTRACLUSTER_HTTPS', False)
@mock.patch('django.conf.settings.REMOTE_STORE_USE_POST', False)
@mock.patch('django.conf.settings.FETCH_TIMEOUT', 10)
def test_RemoteReader_fetch(self, http_request):
test_finders = RemoteFinder.factory()
finder = test_finders[0]
startTime = 1496262000
endTime = 1496262060
# no path or bulk_query
reader = RemoteReader(finder,{})
self.assertEqual(reader.bulk_query, [])
result = reader.fetch(startTime, endTime)
self.assertEqual(result, None)
self.assertEqual(http_request.call_count, 0)
# path & bulk_query
reader = RemoteReader(finder, {'intervals': [], 'path': 'a.b.c.d'}, bulk_query=['a.b.c.*'])
data = [
{
'start': startTime,
'step': 60,
'end': endTime,
'values': [1.0, 0.0, 1.0, 0.0, 1.0],
'name': 'a.b.c.c'
},
{
'start': startTime,
'step': 60,
'end': endTime,
'values': [1.0, 0.0, 1.0, 0.0, 1.0],
'name': 'a.b.c.d'
}
]
responseObject = HTTPResponse(body=BytesIO(pickle.dumps(data)), status=200, preload_content=False)
http_request.return_value = responseObject
result = reader.fetch(startTime, endTime)
expected_response = ((1496262000, 1496262060, 60), [1.0, 0.0, 1.0, 0.0, 1.0])
self.assertEqual(result, expected_response)
self.assertEqual(http_request.call_args[0], (
'GET',
'http://127.0.0.1/render/',
))
self.assertEqual(http_request.call_args[1], {
'fields': [
('format', 'pickle'),
('local', '1'),
('noCache', '1'),
('from', startTime),
('until', endTime),
('target', 'a.b.c.*'),
],
'headers': None,
'preload_content': False,
'timeout': 10,
})
|
11527960
|
import core.modules
import core.modules.module_registry
from core.modules.vistrails_module import Module, ModuleError
from Array import *
import scipy
import scipy.signal
from scipy import sparse, fftpack
import numpy
class WindowModule(object):
my_namespace = 'scipy|signals|windows'
class HanningWindow(WindowModule, Module):
def compute(self):
size = self.get_input("Window Size")
out = NDArray()
out.set_array(scipy.signal.hanning(size))
self.set_output("Window", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Window Size", (basic.Integer, 'Window Size'))
reg.add_output_port(cls, "Window", (NDArray, 'Window Function'))
class TriangularWindow(WindowModule, Module):
def compute(self):
size = self.get_input("Window Size")
out = NDArray()
out.set_array(scipy.signal.triang(size))
self.set_output("Window", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Window Size", (basic.Integer, 'Window Size'))
reg.add_output_port(cls, "Window", (NDArray, 'Window Function'))
class BlackmanWindow(WindowModule, Module):
def compute(self):
size = self.get_input("Window Size")
out = NDArray()
out.set_array(scipy.signal.blackman(size))
self.set_output("Window", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Window Size", (basic.Integer, 'Window Size'))
reg.add_output_port(cls, "Window", (NDArray, 'Window Function'))
class BlackmanHarrisWindow(WindowModule, Module):
def compute(self):
size = self.get_input("Window Size")
out = NDArray()
out.set_array(scipy.signal.blackmanharris(size))
self.set_output("Window", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Window Size", (basic.Integer, 'Window Size'))
reg.add_output_port(cls, "Window", (NDArray, 'Window Function'))
class ParzenWindow(WindowModule, Module):
def compute(self):
size = self.get_input("Window Size")
out = NDArray()
out.set_array(scipy.signal.parzen(size))
self.set_output("Window", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Window Size", (basic.Integer, 'Window Size'))
reg.add_output_port(cls, "Window", (NDArray, 'Window Function'))
class HammingWindow(WindowModule, Module):
def compute(self):
size = self.get_input("Window Size")
out = NDArray()
out.set_array(scipy.signal.hamming(size))
self.set_output("Window", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Window Size", (basic.Integer, 'Window Size'))
reg.add_output_port(cls, "Window", (NDArray, 'Window Function'))
class KaiserWindow(WindowModule, Module):
def compute(self):
size = self.get_input("Window Size")
beta = self.get_input("Beta")
out = NDArray()
out.set_array(scipy.signal.kaiser(size, beta))
self.set_output("Window", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Window Size", (basic.Integer, 'Window Size'))
reg.add_input_port(cls, "Beta", (basic.Float, 'Beta'))
reg.add_output_port(cls, "Window", (NDArray, 'Window Function'))
class BartlettHannWindow(WindowModule, Module):
def compute(self):
size = self.get_input("Window Size")
out = NDArray()
out.set_array(scipy.signal.barthann(size))
self.set_output("Window", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Window Size", (basic.Integer, 'Window Size'))
reg.add_output_port(cls, "Window", (NDArray, 'Window Function'))
class GaussianWindow(WindowModule, Module):
def compute(self):
size = self.get_input("Window Size")
sigma = self.get_input("Sigma")
out = NDArray()
out.set_array(scipy.signal.gaussian(size, sigma))
self.set_output("Window", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Window Size", (basic.Integer, 'Window Size'))
reg.add_input_port(cls, "Sigma", (basic.Float, 'Sigma'))
reg.add_output_port(cls, "Window", (NDArray, 'Window Function'))
class BoxcarWindow(WindowModule, Module):
def compute(self):
size = self.get_input("Window Size")
out = NDArray()
out.set_array(scipy.signal.boxcar(size))
self.set_output("Window", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Window Size", (basic.Integer, 'Window Size'))
reg.add_output_port(cls, "Window", (NDArray, 'Window Function'))
class BohmanWindow(WindowModule, Module):
def compute(self):
size = self.get_input("Window Size")
out = NDArray()
out.set_array(scipy.signal.bohman(size))
self.set_output("Window", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Window Size", (basic.Integer, 'Window Size'))
reg.add_output_port(cls, "Window", (NDArray, 'Window Function'))
class BartlettWindow(WindowModule, Module):
def compute(self):
size = self.get_input("Window Size")
out = NDArray()
out.set_array(scipy.signal.bartlett(size))
self.set_output("Window", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Window Size", (basic.Integer, 'Window Size'))
reg.add_output_port(cls, "Window", (NDArray, 'Window Function'))
class NuttallBlackmanHarrisWindow(WindowModule, Module):
def compute(self):
size = self.get_input("Window Size")
out = NDArray()
out.set_array(scipy.signal.nuttall(size))
self.set_output("Window", out)
@classmethod
def register(cls, reg, basic):
reg.add_module(cls, namespace=cls.my_namespace)
reg.add_input_port(cls, "Window Size", (basic.Integer, 'Window Size'))
reg.add_output_port(cls, "Window", (NDArray, 'Window Function'))
|
11527969
|
class Solution:
def threeSumClosest(self, nums: List[int], target: int) -> int:
nums.sort()
closestSum = sum(nums[:3])
for i in range(len(nums) - 2):
if i == 0 or nums[i] != nums[i - 1]:
start = i + 1
end = len(nums) - 1
while start < end:
s = nums[i] + nums[start] + nums[end]
if s < target:
if target - s < abs(target - closestSum):
closestSum = s
start += 1
elif s > target:
if s - target < abs(target - closestSum):
closestSum = s
end -= 1
else:
return target
return closestSum
|
11527990
|
import sys
import glob
from datetime import datetime, timedelta
import traces
from traces.utils import datetime_range
def parse_iso_datetime(value):
return datetime.strptime(value, "%Y-%m-%dT%H:%M:%S")
def read_all(pattern='data/lightbulb-*.csv'):
"""Read all of the CSVs in a directory matching the filename pattern
as TimeSeries.
"""
result = []
for filename in glob.iglob(pattern):
print('reading', filename, file=sys.stderr)
ts = traces.TimeSeries.from_csv(
filename,
time_column=0,
time_transform=parse_iso_datetime,
value_column=1,
value_transform=int,
default=0,
)
ts.compact()
result.append(ts)
return result
ts_list = read_all()
total_watts = traces.TimeSeries.merge(ts_list, operation=sum)
# use distribution to look at the distribution of number of lights on
# over a month
histogram = total_watts.distribution(
start=datetime(2016, 1, 1),
end=datetime(2016, 2, 1),
)
print(histogram.mean())
# use distribution with mask to look at the median/lower/upper of
# lights on by hour of day, plot with your tool of choice
for hour, distribution in total_watts.distribution_by_hour_of_day():
print(hour, distribution.quantiles([0.25, 0.5, 0.75]))
for day, distribution in total_watts.distribution_by_day_of_week():
print(day, distribution.quantiles([0.25, 0.5, 0.75]))
# look at the typical number of lights on during business hours
# (8am-6pm) for each day in january
for t in datetime_range(datetime(2016, 1, 1), datetime(2016, 2, 1), 'days'):
biz_start = t + timedelta(hours=8)
biz_end = t + timedelta(hours=18)
histogram = total_watts.distribution(start=biz_start, end=biz_end)
print(t, histogram.quantiles([0.25, 0.5, 0.75]))
# transform time series to evenly spaced version using moving average
# instead of just sampling to avoid aliasing, and proceed to use
# statsmodels/pandas to forecast electricity usage "Modeling Time
# Series" http://tomaugspurger.github.io/modern-7-timeseries.html, in
# the Jupyter notebook it's `In [17]`.
regular = total_watts.moving_average(300, pandas=True)
print(regular)
|
11527999
|
import ApplicationRegisterService_pb2_grpc
import ApplicationRegisterService_pb2
import grpc
import urllib
import json
class SkyWalking(object):
def __init__(self, agentstream=["app.danoolive.com:10800"], applicationCode="python-test-service", debug=False):
if isinstance(agentstream, string):
agentstreams = [agentstreams]
self.agentstreams = agentstreams
self.applicationCode = applicationCode
self.debug = debug
self.application = None
def update_grpc_servers(self):
tmps = []
for agentstream in agentstreams :
try :
tmp = urllib.urlopen("http://"+self.agentstream+"/agentstream/grpc").read()
if not tmp :
if self.debug:
print("emtry agentstream? " + agentstream)
continue
grpc_servers = json.loads(tmp)
if not grpc_servers :
if self.debug:
print("emtry agentstream? " + agentstream)
tmps += grpc_servers
except:
if self.debug:
print("bad agentstream="+agentstream)
if not tmps :
if self.debug:
print("emtry grpc_servers")
self.grpc_servers = tmps
def register(self):
for grpc_server in self.grpc_servers:
try :
self.channel = grpc.insecure_channel(self.grpc_servers[0])
self.stub = ApplicationRegisterService_pb2_grpc.ApplicationRegisterServiceStub(self.channel)
tmp = self.stub.register(ApplicationRegisterService_pb2.Application(applicationCode =[self.applicationCode]))
if tmp and tmp.application :
self.application = tmp.application
break
except:
if self.debug:
print("grpc_server is down? " + grpc_server)
sky = SkyWalking()
sky.update_grpc_servers()
sky.register()
|
11528046
|
from lakesuperior.dictionaries.namespaces import ns_collection as nsc
srv_mgd_subjects = {
nsc['fcsystem'].root,
}
srv_mgd_predicates = {
nsc['fcrepo'].created,
nsc['fcrepo'].createdBy,
nsc['fcrepo'].hasFixityService,
nsc['fcrepo'].hasParent,
nsc['fcrepo'].lastModified,
nsc['fcrepo'].lastModifiedBy,
nsc['fcrepo'].writable,
nsc['iana'].describedBy,
nsc['ldp'].contains,
nsc['premis'].hasMessageDigest,
nsc['premis'].hasSize,
}
srv_mgd_types = {
nsc['fcrepo'].Binary,
nsc['fcrepo'].Container,
nsc['fcrepo'].Pairtree,
nsc['fcrepo'].Resource,
nsc['fcrepo'].Version,
nsc['ldp'].BasicContainer,
nsc['ldp'].Container,
nsc['ldp'].DirectContainer,
nsc['ldp'].IndirectContainer,
nsc['ldp'].NonRDFSource,
nsc['ldp'].RDFSource,
nsc['ldp'].Resource,
}
|
11528058
|
import base64
from flask import render_template, url_for, redirect, session, request, current_app
from flask_login import LoginManager
from ..models.user import User
login_manager = LoginManager()
def handle_bad_request(e):
return render_template('errors/400.html', code=400, message=e), 400
def handle_unauthorized_access(e):
session['next'] = request.script_root + request.path
return redirect(url_for('index.login'))
def handle_access_forbidden(e):
return render_template('errors/403.html', code=403, message=e), 403
def handle_page_not_found(e):
return render_template('errors/404.html', code=404, message=e), 404
def handle_internal_server_error(e):
return render_template('errors/500.html', code=500, message=e), 500
def load_if_valid(user, method, src_ip, trust_user = False):
try:
auth = user.is_validate(method, src_ip, trust_user)
if auth == False:
return None
else:
# login_user(user, remember=False)
return User.query.filter(User.id==user.id).first()
except Exception as e:
current_app.logger.error('Error: {0}'.format(e))
return None
@login_manager.user_loader
def load_user(id):
"""
This will be current_user
"""
return User.query.get(int(id))
@login_manager.request_loader
def login_via_authorization_header_or_remote_user(request):
# Try to login using Basic Authentication
auth_header = request.headers.get('Authorization')
if auth_header:
auth_method = request.args.get('auth_method', 'LOCAL')
auth_method = 'LDAP' if auth_method != 'LOCAL' else 'LOCAL'
auth_header = auth_header.replace('Basic ', '', 1)
try:
auth_header = str(base64.b64decode(auth_header), 'utf-8')
username, password = auth_header.split(":")
except TypeError as e:
return None
user = User(username=username,
password=password,
plain_text_password=password)
return load_if_valid(user, method=auth_method, src_ip=request.remote_addr)
# Try login by checking a REMOTE_USER environment variable
remote_user = request.remote_user
if remote_user and current_app.config.get('REMOTE_USER_ENABLED'):
session_remote_user = session.get('remote_user')
# If we already validated a remote user against an authorization method
# a local user should have been created in the database, so we force a 'LOCAL' auth_method
auth_method = 'LOCAL' if session_remote_user else current_app.config.get('REMOTE_AUTH_METHOD', 'LDAP')
current_app.logger.debug(
'REMOTE_USER environment variable found: attempting {0} authentication for username "{1}"'
.format(auth_method, remote_user))
user = User(username=remote_user.strip())
valid_remote_user = load_if_valid(user, method=auth_method, src_ip=request.remote_addr, trust_user=True)
if valid_remote_user:
# If we were successful in authenticating a trusted remote user, store it in session
session['remote_user'] = valid_remote_user.username
return valid_remote_user
return None
|
11528125
|
import FWCore.ParameterSet.Config as cms
JetIDParams = cms.PSet(
useRecHits = cms.bool(True),
hbheRecHitsColl = cms.InputTag("hbhereco"),
hoRecHitsColl = cms.InputTag("horeco"),
hfRecHitsColl = cms.InputTag("hfreco"),
ebRecHitsColl = cms.InputTag("ecalRecHit", "EcalRecHitsEB"),
eeRecHitsColl = cms.InputTag("ecalRecHit", "EcalRecHitsEE"),
rpcRecHits = cms.InputTag("rpcRecHits")
)
|
11528132
|
import torch
import logging
import pdb
import os
import datetime
import warnings
warnings.filterwarnings("ignore")
from config import cfg
from data import make_data_loader
from solver import build_optimizer, build_scheduler
from utils.check_point import DetectronCheckpointer
from engine import (
default_argument_parser,
default_setup,
launch,
)
from utils import comm
from utils.backup_files import sync_root
from engine.trainer import do_train
from engine.test_net import run_test
from model.detector import KeypointDetector
from data import build_test_loader
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (4096, rlimit[1]))
torch.backends.cudnn.enabled = True # enable cudnn and uncertainty imported
# torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True # enable cudnn to search the best algorithm
def train(cfg, model, device, distributed):
data_loader = make_data_loader(cfg, is_train=True)
data_loaders_val = build_test_loader(cfg, is_train=False)
total_iters_each_epoch = len(data_loader.dataset) // cfg.SOLVER.IMS_PER_BATCH
# use epoch rather than iterations for saving checkpoint and validation
if cfg.SOLVER.EVAL_AND_SAVE_EPOCH:
cfg.SOLVER.MAX_ITERATION = cfg.SOLVER.MAX_EPOCHS * total_iters_each_epoch
cfg.SOLVER.SAVE_CHECKPOINT_INTERVAL = total_iters_each_epoch * cfg.SOLVER.SAVE_CHECKPOINT_EPOCH_INTERVAL
cfg.SOLVER.EVAL_INTERVAL = total_iters_each_epoch * cfg.SOLVER.EVAL_EPOCH_INTERVAL
cfg.SOLVER.STEPS = [total_iters_each_epoch * x for x in cfg.SOLVER.DECAY_EPOCH_STEPS]
cfg.SOLVER.WARMUP_STEPS = cfg.SOLVER.WARMUP_EPOCH * total_iters_each_epoch
cfg.freeze()
optimizer = build_optimizer(model, cfg)
scheduler, warmup_scheduler = build_scheduler(
optimizer, total_iters_each_epoch=total_iters_each_epoch,
optim_cfg=cfg.SOLVER,
)
arguments = {}
arguments["iteration"] = 0
arguments["iter_per_epoch"] = total_iters_each_epoch
output_dir = cfg.OUTPUT_DIR
save_to_disk = comm.get_rank() == 0
checkpointer = DetectronCheckpointer(
cfg, model, optimizer, scheduler, output_dir, save_to_disk
)
if len(cfg.MODEL.WEIGHT) > 0:
extra_checkpoint_data = checkpointer.load(cfg.MODEL.WEIGHT, use_latest=False)
arguments.update(extra_checkpoint_data)
do_train(
cfg,
distributed,
model,
data_loader,
data_loaders_val,
optimizer,
scheduler,
warmup_scheduler,
checkpointer,
device,
arguments,
)
def setup(args):
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.SOLVER.IMS_PER_BATCH = args.batch_size
cfg.DATALOADER.NUM_WORKERS = args.num_work
cfg.TEST.EVAL_DIS_IOUS = args.eval_iou
cfg.TEST.EVAL_DEPTH = args.eval_depth
if args.vis_thre > 0:
cfg.TEST.VISUALIZE_THRESHOLD = args.vis_thre
if args.output is not None:
cfg.OUTPUT_DIR = args.output
if args.test:
cfg.DATASETS.TEST_SPLIT = 'test'
cfg.DATASETS.TEST = ("kitti_test",)
cfg.START_TIME = datetime.datetime.strftime(datetime.datetime.now(), '%m-%d %H:%M:%S')
default_setup(cfg, args)
return cfg
def main(args):
cfg = setup(args)
distributed = comm.get_world_size() > 1
if not distributed: cfg.MODEL.USE_SYNC_BN = False
model = KeypointDetector(cfg)
device = torch.device(cfg.MODEL.DEVICE)
model.to(device)
if args.eval_only:
checkpointer = DetectronCheckpointer(
cfg, model, save_dir=cfg.OUTPUT_DIR
)
ckpt = cfg.MODEL.WEIGHT if args.ckpt is None else args.ckpt
_ = checkpointer.load(ckpt, use_latest=args.ckpt is None)
return run_test(cfg, checkpointer.model, vis=args.vis, eval_score_iou=args.eval_score_iou, eval_all_depths=args.eval_all_depths)
if distributed:
# convert BN to SyncBN
if cfg.MODEL.USE_SYNC_BN:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[comm.get_local_rank()], broadcast_buffers=False,
find_unused_parameters=True,
)
train(cfg, model, device, distributed)
if __name__ == '__main__':
args = default_argument_parser().parse_args()
print("Command Line Args:", args)
# backup all python files when training
if not args.eval_only and args.output is not None:
sync_root('.', os.path.join(args.output, 'backup'))
import shutil
shutil.copy2(args.config_file, os.path.join(args.output, 'backup', os.path.basename(args.config_file)))
print("Finish backup all files")
launch(
main,
args.num_gpus,
num_machines=args.num_machines,
machine_rank=args.machine_rank,
dist_url=args.dist_url,
args=(args,),
)
|
11528157
|
import pytest
import xmltodict
from jmeter_api.samplers.jdbc_request.elements import JdbcRequest, ResultSetHandler, QueryType
class TestJdbcRequestArgsTypes:
def test_name(self):
with pytest.raises(TypeError):
JdbcRequest(name=123)
def test_comments(self):
with pytest.raises(TypeError):
JdbcRequest(comments=123)
def test_data_source(self):
with pytest.raises(TypeError):
JdbcRequest(data_source=123)
def test_query_type(self):
with pytest.raises(TypeError):
JdbcRequest(query_type=123)
def test_query(self):
with pytest.raises(TypeError):
JdbcRequest(query=123)
def test_parameter_values1(self):
with pytest.raises(TypeError):
JdbcRequest(parameter_values=123)
def test_parameter_values2(self):
with pytest.raises(TypeError):
JdbcRequest(parameter_values='123')
def test_parameter_types1(self):
with pytest.raises(TypeError):
JdbcRequest(parameter_types=123)
def test_parameter_types2(self):
with pytest.raises(TypeError):
JdbcRequest(parameter_types='123')
def test_variable_names(self):
with pytest.raises(TypeError):
JdbcRequest(variable_names=123)
def test_result_variable_name(self):
with pytest.raises(TypeError):
JdbcRequest(result_variable_name=123)
def test_query_timeout1(self):
with pytest.raises(ValueError):
JdbcRequest(query_timeout=-1)
def test_query_timeout2(self):
with pytest.raises(TypeError):
JdbcRequest(query_timeout='123')
def test_handle_result_set1(self):
with pytest.raises(TypeError):
JdbcRequest(handle_result_set='123')
def test_handle_result_set2(self):
with pytest.raises(TypeError):
JdbcRequest(handle_result_set=1)
def test_is_enabled(self):
with pytest.raises(TypeError):
JdbcRequest(is_enabled=None)
class TestJdbcRequestRender:
def test_name(self):
element = JdbcRequest(name='jdbc')
rendered_doc = element.to_xml().replace('<hashTree />', '')
parsed_doc = xmltodict.parse(rendered_doc)
assert parsed_doc['JDBCSampler']['@testname'] == 'jdbc'
def test_is_enabled(self):
element = JdbcRequest(is_enabled=False)
rendered_doc = element.to_xml().replace('<hashTree />', '')
parsed_doc = xmltodict.parse(rendered_doc)
assert parsed_doc['JDBCSampler']['@enabled'] == 'false'
def test_data_source(self):
element = JdbcRequest(data_source='data source test')
rendered_doc = element.to_xml().replace('<hashTree />', '')
parsed_doc = xmltodict.parse(rendered_doc)
for tag in parsed_doc['JDBCSampler']['stringProp']:
if tag['@name'] == 'dataSource':
assert tag['#text'] == 'data source test'
def test_query_type(self):
element = JdbcRequest(query_type=QueryType.AUTOCOMMIT)
rendered_doc = element.to_xml().replace('<hashTree />', '')
parsed_doc = xmltodict.parse(rendered_doc)
for tag in parsed_doc['JDBCSampler']['stringProp']:
if tag['@name'] == 'queryType':
assert tag['#text'] == 'AutoCommit(false)'
def test_query(self):
q = """
select * from table
where col = '1'""".strip()
element = JdbcRequest(query=q)
rendered_doc = element.to_xml().replace('<hashTree />', '')
parsed_doc = xmltodict.parse(rendered_doc)
for tag in parsed_doc['JDBCSampler']['stringProp']:
if tag['@name'] == 'query':
assert tag['#text'] == q
def test_parameter_values(self):
element = JdbcRequest(parameter_values=['param-1', 'param-2'])
rendered_doc = element.to_xml().replace('<hashTree />', '')
parsed_doc = xmltodict.parse(rendered_doc)
for tag in parsed_doc['JDBCSampler']['stringProp']:
if tag['@name'] == 'queryArguments':
assert tag['#text'] == '${param-1},${param-2}'
def test_parameter_types(self):
element = JdbcRequest(parameter_types=['varchar ', ' INTEGER'])
rendered_doc = element.to_xml().replace('<hashTree />', '')
parsed_doc = xmltodict.parse(rendered_doc)
for tag in parsed_doc['JDBCSampler']['stringProp']:
if tag['@name'] == 'queryArgumentsTypes':
assert tag['#text'] == 'VARCHAR,INTEGER'
def test_variable_names(self):
element = JdbcRequest(variable_names='var name')
rendered_doc = element.to_xml().replace('<hashTree />', '')
parsed_doc = xmltodict.parse(rendered_doc)
for tag in parsed_doc['JDBCSampler']['stringProp']:
if tag['@name'] == 'variableNames':
assert tag['#text'] == 'var name'
def test_result_variable_name(self):
element = JdbcRequest(result_variable_name='var name')
rendered_doc = element.to_xml().replace('<hashTree />', '')
parsed_doc = xmltodict.parse(rendered_doc)
for tag in parsed_doc['JDBCSampler']['stringProp']:
if tag['@name'] == 'resultVariable':
assert tag['#text'] == 'var name'
def test_query_timeout(self):
element = JdbcRequest(query_timeout=500)
rendered_doc = element.to_xml().replace('<hashTree />', '')
parsed_doc = xmltodict.parse(rendered_doc)
for tag in parsed_doc['JDBCSampler']['stringProp']:
if tag['@name'] == 'queryTimeout':
assert tag['#text'] == '500'
def test_handle_result_set(self):
element = JdbcRequest(handle_result_set=ResultSetHandler.COUNT_RECORDS)
rendered_doc = element.to_xml().replace('<hashTree />', '')
parsed_doc = xmltodict.parse(rendered_doc)
for tag in parsed_doc['JDBCSampler']['stringProp']:
if tag['@name'] == 'resultSetHandler':
assert tag['#text'] == 'Count Records'
def test_hashtree_contain(self):
element = JdbcRequest(name='Jdbc',
handle_result_set=ResultSetHandler.COUNT_RECORDS,
query_timeout=20
)
rendered_doc = element.to_xml()
assert '<hashTree />' in rendered_doc
|
11528204
|
from datetime import datetime, timezone
import scrapy
class Page(scrapy.Item):
"""
General scrapy item to store entire HTTP body
"""
url = scrapy.Field()
body = scrapy.Field()
crawled_at = scrapy.Field()
def __repr__(self):
"""
Omit body to shorten logs.
"""
p = self.__class__(self) # Duplicate Page instance
if len(p['body']) > 203:
p['body'] = p['body'][:100] + '...' + p['body'][-100:]
return super(Page, p).__repr__() # Return representation of duplicated page
@classmethod
def from_response(cls, response):
item = cls()
item['url'] = response.url
item['body'] = response.text
item['crawled_at'] = datetime.now(timezone.utc).replace(microsecond=0).isoformat()
return item
|
11528220
|
from shadowlands.tui.effects.cursor import Cursor
from shadowlands.tui.debug import debug
import pdb
class DynamicSourceCursor(Cursor):
def __init__(self, screen, renderer, x, y, refresh_period=None, **kwargs):
super(DynamicSourceCursor, self).__init__(screen, renderer, x, y, **kwargs)
self._previous_buffer = ['']
self._current_buffer = ['']
self._previous_colours = [()]
self._current_colours = [()]
self._refresh_period = refresh_period
def need_new_buffer(self):
return self._current_buffer == None or (self.char >= len(self._current_buffer[self.image_index]) and self._current_buffer != self._renderer.rendered_text[0])
def get_buffer(self):
# if current buffer is unset, grab the rendered text.
# also, if we have already reached the end of the text,
# go ahead and grab another buffer from the renderer.
if self.need_new_buffer():
image, colours = self._renderer.rendered_text
self._previous_buffer = self._current_buffer
self._current_buffer = image
self._previous_colours = self._current_colours
self._current_colours = colours
self.reset()
#debug(); pdb.set_trace()
return self._current_buffer, self._current_colours
def _update(self, frame_no):
if not self.need_new_buffer:
return
if self._refresh_period:
if frame_no % self._refresh_period == 0:
self.reset()
super(DynamicSourceCursor, self)._update(frame_no)
# Now we overwrite with spaces the difference between the sizes
# of the current and previous buffer, if the prev buffer was
# larger.
size_difference = len(self._previous_buffer[self.image_index]) - len(self._current_buffer[self.image_index])
if size_difference > 0:
#spaces = ' ' * size_difference
for i in range(size_difference):
self.print_space(i)
if i < size_difference - 1:
self.print_cursor(i)
def print_space(self, i):
self._screen.print_at(' ', self._x+i, self._y, self._colour)
def print_cursor(self, i):
self._screen.print_at(self.CURSOR, self._x+i+1, self._y, self._colour)
|
11528225
|
import argparse
import logging
import os
import shutil
from overrides import overrides
from subprocess import Popen, PIPE
from typing import List
from sacrerouge.commands import MetricSetupSubcommand
from sacrerouge.common import DATA_ROOT, TemporaryDirectory
from sacrerouge.data import MetricsDict
from sacrerouge.data.types import ReferenceType, SummaryType
from sacrerouge.metrics import Metric, ReferenceBasedMetric
from sacrerouge.io import JsonlReader, JsonlWriter
logger = logging.getLogger(__name__)
@Metric.register('s3')
class S3(ReferenceBasedMetric):
def __init__(self,
environment_name: str = None,
s3_root: str = f'{DATA_ROOT}/metrics/S3',
embeddings_file: str = f'{DATA_ROOT}/metrics/S3/deps.words.bz2',
model_dir: str = f'{DATA_ROOT}/metrics/S3/models/en',
verbose: bool = False):
super().__init__()
self.environment_name = environment_name
self.s3_root = s3_root
self.embeddings_file = embeddings_file
self.model_dir = model_dir
self.verbose = verbose
if self.environment_name is not None:
if 'CONDA_INIT' not in os.environ:
raise Exception('If `environment_name` is not none, environment variable "CONDA_INIT" must be set to the path to "conda.sh"')
def _flatten_summaries(self, summaries_list: List[List[SummaryType]]) -> List[List[str]]:
flattened_list = []
for summaries in summaries_list:
flattened_list.append([])
for summary in summaries:
if isinstance(summary, list):
summary = ' '.join(summary)
flattened_list[-1].append(summary)
return flattened_list
def score_multi_all(self,
summaries_list: List[List[SummaryType]],
references_list: List[List[ReferenceType]],
**kwargs) -> List[List[MetricsDict]]:
summaries_list = self._flatten_summaries(summaries_list)
references_list = self._flatten_summaries(references_list)
logger.info(f'Serializing the summaries and references to a file')
num_summaries = 0
with TemporaryDirectory() as temp_dir:
input_file = f'{temp_dir}/input.jsonl'
output_file = f'{temp_dir}/output.jsonl'
with JsonlWriter(input_file) as out:
for summaries, references in zip(summaries_list, references_list):
for summary in summaries:
out.write({
'summary': summary,
'references': references
})
num_summaries += 1
logger.info(f'Wrote {num_summaries} (summary, references) pairs')
commands = [f'cd {self.s3_root}/S3']
if self.environment_name is not None:
commands.append(f'source {os.environ["CONDA_INIT"]}')
commands.append(f'conda activate {self.environment_name}')
commands.append(f'python2.7 run_batch.py {input_file} {output_file} {self.embeddings_file} {self.model_dir}')
command = ' && '.join(commands)
logger.info(f'Running command: "{command}"')
redirect = None if self.verbose else PIPE
process = Popen(command, stdout=redirect, stderr=redirect, shell=True)
process.communicate()
scores = JsonlReader(output_file).read()
assert len(scores) == num_summaries
metrics_list = []
index = 0
for summaries in summaries_list:
metrics_list.append([])
for _ in summaries:
metrics_list[-1].append(MetricsDict({
's3': {
'pyr': scores[index]['pyr'],
'resp': scores[index]['resp'],
}
}))
index += 1
return metrics_list
@MetricSetupSubcommand.register('s3')
class S3SetupSubcommand(MetricSetupSubcommand):
@overrides
def add_subparser(self, parser: argparse._SubParsersAction):
description = 'Setup the S3 metric'
self.parser = parser.add_parser('s3', description=description, help=description)
self.parser.add_argument('--force', action='store_true', help='Force setting up the metric again')
self.parser.set_defaults(subfunc=self.run)
@overrides
def run(self, args):
if args.force and os.path.exists(f'{DATA_ROOT}/metrics/S3'):
shutil.rmtree(f'{DATA_ROOT}/metrics/S3')
commands = [
f'mkdir -p {DATA_ROOT}/metrics',
f'cd {DATA_ROOT}/metrics',
f'git clone https://github.com/danieldeutsch/S3',
f'cd S3',
f'wget http://u.cs.biu.ac.il/~yogo/data/syntemb/deps.words.bz2'
]
command = ' && '.join(commands)
process = Popen(command, shell=True)
process.communicate()
if process.returncode == 0:
print('S3 setup success')
else:
print('S3 setup failure')
|
11528226
|
FIELD_CACHE = {}
def Nullable(cls, **kwargs):
if cls in FIELD_CACHE:
return FIELD_CACHE[cls](**kwargs)
class new(cls):
class Meta:
name = f"Nullable{cls._meta.name}"
@staticmethod
def serialize(value):
if not value:
return None
return cls.serialize(value)
@staticmethod
def parse_value(value):
if not value:
return ""
return cls.parse_value(value)
new.__name__ = f"Nullable{type(cls).__name__}"
new.__doc__ = cls.__doc__
FIELD_CACHE[cls] = new
return new(**kwargs)
|
11528256
|
from . import Task
class DummyTask(Task):
def setup(self):
return self.success()
def compile(self):
return self.success()
def run(self):
return self.success()
|
11528279
|
import json
import argparse
import sys
import logging
import random
from datetime import datetime
import os
import numpy as np
import pickle
from tqdm import tqdm, trange
import torch
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from torch.utils.data.distributed import DistributedSampler
import bert_utils
import model
from model import BertQA
from transformers import BertTokenizer
from transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from transformers.optimization import AdamW, WarmupLinearSchedule
from tensorboardX import SummaryWriter
from prepare_dataset import QuestionParagraph, ParagraphAnswer
def return_original_index(index, max_seq_length, num_paragraphs):
for i in range(0, num_paragraphs):
if index >= max_seq_length * i and index < max_seq_length * (i + 1):
return (i, index - (max_seq_length * i))
parser = argparse.ArgumentParser()
parser.add_argument("--q_p_file", default=None, help="Question Paragraph pickle file")
parser.add_argument("--local_rank", type=int, default=-1, help="local_rank for distributed training on gpus")
parser.add_argument("--no_cuda", action='store_true', help="Whether not to use CUDA when available")
parser.add_argument("--output_dir", default=None, help="Directory to write the trained weights")
parser.add_argument("--num_paragraphs_per_question", default=4, help="Number of top k paragraphs to test on should match the one chosen in q_p_file")
args = parser.parse_args()
## read the pickle file
q_p = pickle.load(open(args.q_p_file, 'rb'))
### create BERT input examples template
dev_InputExamples = []
for q in q_p:
for p in q.paragraphs:
dev_InputExamples.append(bert_utils.InputExample(p.tokens, q.tokens, p.start_index, p.end_index))
print ()
print ("Length of train_InputExamples: ", len(dev_InputExamples))
print ()
####################
### Some defaults:
bert_model = 'bert-base-uncased'
# bert_model = 'bert-large-uncased-whole-word-masking'
MAX_SEQ_LENGTH = 384
do_lower_case = True
batch_size = 180 * 4 # 192 * 4
learning_rate = 3e-5
adam_epsilon = 1e-8
num_epochs = 5
warmup_proportion = 0.0
gradient_accumulation_steps = 1
seed = 5
logging_steps = 50
save_steps = 1000
max_grad_norm = 1.0
num_paragraphs_per_question = int(args.num_paragraphs_per_question) # 4 #1 #5 #15 #4
#####################################
# Setup CUDA, GPU & distributed training
if args.local_rank == -1 or args.no_cuda:
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
# print ('local rank: ', args.local_rank)
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl')
n_gpu = 1
print ()
print ("Number of GPUs: ", n_gpu)
print ()
batch_size = batch_size // gradient_accumulation_steps
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if n_gpu > 0:
torch.cuda.manual_seed_all(seed)
if args.local_rank not in [-1, 0]:
torch.distributed.barrier()
tokenizer = BertTokenizer.from_pretrained(args.output_dir, do_lower_case=do_lower_case)
model_qa = BertQA.from_pretrained(args.output_dir)
model_qa.to(device)
dev_features = bert_utils.convert_examples_to_features(dev_InputExamples, MAX_SEQ_LENGTH, tokenizer)
all_input_ids = torch.tensor([f.input_ids for f in dev_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in dev_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in dev_features], dtype=torch.long)
all_start_positions = torch.tensor([f.start_label_ids for f in dev_features], dtype=torch.long)
all_end_positions = torch.tensor([f.end_label_ids for f in dev_features], dtype=torch.long)
dev_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_start_positions, all_end_positions)
dev_sampler = SequentialSampler(dev_data) if args.local_rank == -1 else DistributedSampler(train_data)
dev_dataloader = DataLoader(dev_data, sampler=dev_sampler, batch_size=batch_size)
if n_gpu > 1:
model_qa = torch.nn.DataParallel(model_qa)
log_softmax = torch.nn.LogSoftmax()
ctr = 0
start_list = []
end_list = []
for batch in tqdm(dev_dataloader, desc="Evaluating"):
model_qa.eval()
batch = tuple(t.to(device) for t in batch)
with torch.no_grad():
input_ids, input_mask, segment_ids, start_positions, end_positions = batch
start_logits, end_logits = model_qa(input_ids, input_mask, segment_ids)
### split into parts
start_logits = start_logits.detach().cpu()
end_logits = end_logits.detach().cpu()
start_logit_chunks = torch.split(start_logits, num_paragraphs_per_question, dim=0)
end_logit_chunks = torch.split(end_logits, num_paragraphs_per_question, dim=0)
num_chunks = len(start_logit_chunks)
for n in range(num_chunks):
start_logit_list = []
end_logit_list = []
for p in range(num_paragraphs_per_question):
start_logit_list.append(start_logit_chunks[n][p])
end_logit_list.append(end_logit_chunks[n][p])
# print ('start logit list shape: ', torch.cat(start_logit_list).size())
f_start_index = torch.argmax(log_softmax(torch.cat(start_logit_list)))
f_end_index = torch.argmax(log_softmax(torch.cat(end_logit_list)))
s_p, s_index = return_original_index(f_start_index, MAX_SEQ_LENGTH, num_paragraphs_per_question)
e_p, e_index = return_original_index(f_end_index, MAX_SEQ_LENGTH, num_paragraphs_per_question)
start_list.append((s_p, s_index))
end_list.append((e_p, e_index))
ctr += 1
print ("Done batch: ", ctr)
actual_ans = []
pred_ans = []
not_in_same_paragraph = 0
for index, q in enumerate(q_p):
actual_ans.append(q.original_all_answers)
q_tokens = q.tokens
s_p, s_index = start_list[index]
e_p, e_index = end_list[index]
if s_p != e_p:
not_in_same_paragraph += 1
pred_ans.append("")
continue
new_s_index = s_index - len(q_tokens) - 2
new_e_index = e_index - len(q_tokens) - 2
p = q.paragraphs[s_p].tokens
if new_s_index < 0 or new_s_index > len(p) - 1 or new_e_index < 0 or new_e_index > len(p) - 1:
pred_ans.append("")
else:
orig_p = q.paragraphs[s_p].whitespace_tokens
orig_s_index = q.paragraphs[s_p].tokens_to_original_index[new_s_index]
orig_e_index = q.paragraphs[s_p].tokens_to_original_index[new_e_index]
pred_ans.append(" ".join(orig_p[orig_s_index:orig_e_index + 1]))
import pickle
print ()
print ("Not in same paragraph: ", not_in_same_paragraph)
print ()
pickle.dump(actual_ans, open('actual_ans.pkl', 'wb'))
pickle.dump(pred_ans, open('pred_ans.pkl', 'wb'))
|
11528304
|
from reportlab.graphics.barcode import createBarcodeDrawing
from reportlab.graphics.shapes import Drawing
from reportlab.lib import units
class Barcode:
@staticmethod
def get_barcode(value, width, barWidth=0.05 * units.inch, fontSize=30, humanReadable=True):
barcode = createBarcodeDrawing(
'Code128', value=value, barWidth=barWidth, fontSize=fontSize, humanReadable=humanReadable
)
drawing_width = width
barcode_scale = drawing_width / barcode.width
drawing_height = barcode.height * barcode_scale
drawing = Drawing(drawing_width, drawing_height)
drawing.scale(barcode_scale, barcode_scale)
drawing.add(barcode, name='barcode')
return drawing
|
11528315
|
from urllib.parse import urlencode, urlsplit
from django.contrib.auth.tokens import default_token_generator
from django.urls import reverse
from django.utils.encoding import force_bytes
from django.utils.http import urlsafe_base64_encode
from templated_email import send_templated_mail
from ..account.models import User
from ..celeryconf import app
from ..core.emails import get_email_context
from ..core.utils import build_absolute_uri
def send_set_password_email_with_url(redirect_url, user, staff=False):
"""Trigger sending a set password email for the given customer/staff."""
template_type = "staff" if staff else "customer"
template = f"dashboard/{template_type}/set_password"
token = default_token_generator.make_token(user)
_send_set_user_password_email_with_url.delay(
user.email, redirect_url, token, template
)
def send_set_password_email(user, staff=False):
"""Trigger sending a set password email for the given customer/staff."""
template_type = "staff" if staff else "customer"
template = f"dashboard/{template_type}/set_password"
token = default_token_generator.make_token(user)
_send_set_user_password_email.delay(user.email, user.pk, token, template)
@app.task
def _send_set_user_password_email_with_url(
recipient_email, redirect_url, token, template_name
):
params = urlencode({"email": recipient_email, "token": token})
password_set_url = urlsplit(redirect_url)
password_set_url = password_set_url._replace(query=params)
_send_set_password_email(recipient_email, password_set_url.geturl(), template_name)
@app.task
def _send_set_user_password_email(recipient_email, user_pk, token, template_name):
uid = urlsafe_base64_encode(force_bytes(user_pk))
password_set_url = build_absolute_uri(
reverse(
"account:reset-password-confirm", kwargs={"token": token, "uidb64": uid}
)
)
_send_set_password_email(recipient_email, password_set_url, template_name)
def _send_set_password_email(recipient_email, password_set_url, template_name):
send_kwargs, ctx = get_email_context()
ctx["password_set_url"] = password_set_url
send_templated_mail(
template_name=template_name,
recipient_list=[recipient_email],
context=ctx,
**send_kwargs,
)
@app.task
def send_promote_customer_to_staff_email(staff_pk):
staff = User.objects.get(pk=staff_pk)
send_kwargs, ctx = get_email_context()
ctx["dashboard_url"] = build_absolute_uri(reverse("dashboard:index"))
send_templated_mail(
template_name="dashboard/staff/promote_customer",
recipient_list=[staff.email],
context=ctx,
**send_kwargs,
)
|
11528339
|
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
from .augment import random_affine, random_photometric
from .flow_util import flow_to_color
from .util import resize_area, resize_bilinear
from .losses import compute_losses, create_border_mask
from ..ops import downsample
from .image_warp import image_warp
from .flownet import flownet, FLOW_SCALE
# REGISTER ALL POSSIBLE LOSS TERMS
LOSSES = ['occ', 'sym', 'fb', 'grad', 'ternary', 'photo', 'smooth_1st', 'smooth_2nd']
def _track_loss(op, name):
tf.add_to_collection('losses', tf.identity(op, name=name))
def _track_image(op, name):
name = 'train/' + name
tf.add_to_collection('train_images', tf.identity(op, name=name))
def unsupervised_loss(batch, params, normalization=None, augment=True,
return_flow=False):
channel_mean = tf.constant(normalization[0]) / 255.0
im1, im2 = batch
im1 = im1 / 255.0
im2 = im2 / 255.0
im_shape = tf.shape(im1)[1:3]
# -------------------------------------------------------------------------
# Data & mask augmentation
border_mask = create_border_mask(im1, 0.1)
if augment:
im1_geo, im2_geo, border_mask_global = random_affine(
[im1, im2, border_mask],
horizontal_flipping=True,
min_scale=0.9, max_scale=1.1
)
# augment locally
im2_geo, border_mask_local = random_affine(
[im2_geo, border_mask],
min_scale=0.9, max_scale=1.1
)
border_mask = border_mask_local * border_mask_global
im1_photo, im2_photo = random_photometric(
[im1_geo, im2_geo],
noise_stddev=0.04, min_contrast=-0.3, max_contrast=0.3,
brightness_stddev=0.02, min_colour=0.9, max_colour=1.1,
min_gamma=0.7, max_gamma=1.5)
_track_image(im1_photo, 'augmented1')
_track_image(im2_photo, 'augmented2')
else:
im1_geo, im2_geo = im1, im2
im1_photo, im2_photo = im1, im2
# Images for loss comparisons with values in [0, 1] (scale to original using * 255)
im1_norm = im1_geo
im2_norm = im2_geo
# Images for neural network input with mean-zero values in [-1, 1]
im1_photo = im1_photo - channel_mean
im2_photo = im2_photo - channel_mean
flownet_spec = params.get('flownet', 'S')
full_resolution = params.get('full_res')
train_all = params.get('train_all')
flows_fw, flows_bw = flownet(im1_photo, im2_photo,
flownet_spec=flownet_spec,
full_resolution=full_resolution,
backward_flow=True,
train_all=train_all)
flows_fw = flows_fw[-1]
flows_bw = flows_bw[-1]
# -------------------------------------------------------------------------
# Losses
layer_weights = [12.7, 4.35, 3.9, 3.4, 1.1]
layer_patch_distances = [3, 2, 2, 1, 1]
if full_resolution:
layer_weights = [12.7, 5.5, 5.0, 4.35, 3.9, 3.4, 1.1]
layer_patch_distances = [3, 3] + layer_patch_distances
im1_s = im1_norm
im2_s = im2_norm
mask_s = border_mask
final_flow_scale = FLOW_SCALE * 4
final_flow_fw = flows_fw[0] * final_flow_scale
final_flow_bw = flows_bw[0] * final_flow_scale
else:
im1_s = downsample(im1_norm, 4)
im2_s = downsample(im2_norm, 4)
mask_s = downsample(border_mask, 4)
final_flow_scale = FLOW_SCALE
final_flow_fw = tf.image.resize_bilinear(flows_fw[0], im_shape) * final_flow_scale * 4
final_flow_bw = tf.image.resize_bilinear(flows_bw[0], im_shape) * final_flow_scale * 4
combined_losses = dict()
combined_loss = 0.0
for loss in LOSSES:
combined_losses[loss] = 0.0
if params.get('pyramid_loss'):
flow_enum = enumerate(zip(flows_fw, flows_bw))
else:
flow_enum = [(0, (flows_fw[0], flows_bw[0]))]
for i, flow_pair in flow_enum:
layer_name = "loss" + str(i + 2)
flow_scale = final_flow_scale / (2 ** i)
with tf.variable_scope(layer_name):
layer_weight = layer_weights[i]
flow_fw_s, flow_bw_s = flow_pair
mask_occlusion = params.get('mask_occlusion', '')
assert mask_occlusion in ['fb', 'disocc', '']
losses = compute_losses(im1_s, im2_s,
flow_fw_s * flow_scale, flow_bw_s * flow_scale,
border_mask=mask_s if params.get('border_mask') else None,
mask_occlusion=mask_occlusion,
data_max_distance=layer_patch_distances[i])
layer_loss = 0.0
for loss in LOSSES:
weight_name = loss + '_weight'
if params.get(weight_name):
_track_loss(losses[loss], loss)
layer_loss += params[weight_name] * losses[loss]
combined_losses[loss] += layer_weight * losses[loss]
combined_loss += layer_weight * layer_loss
im1_s = downsample(im1_s, 2)
im2_s = downsample(im2_s, 2)
mask_s = downsample(mask_s, 2)
regularization_loss = tf.losses.get_regularization_loss()
final_loss = combined_loss + regularization_loss
_track_loss(final_loss, 'loss/combined')
for loss in LOSSES:
_track_loss(combined_losses[loss], 'loss/' + loss)
weight_name = loss + '_weight'
if params.get(weight_name):
weight = tf.identity(params[weight_name], name='weight/' + loss)
tf.add_to_collection('params', weight)
if not return_flow:
return final_loss
return final_loss, final_flow_fw, final_flow_bw
|
11528367
|
from __future__ import unicode_literals
from datetime import datetime
from django.contrib.auth.models import User
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.testing.testcase import TestCase
class ChangeDescTests(TestCase):
"""Tests for the ChangeDescription model."""
def test_record_string(self):
"""Testing ChangeDescription.record_field_change with a string value"""
old_value = "abc"
new_value = "def"
changedesc = ChangeDescription()
changedesc.record_field_change("test", old_value, new_value)
self.assertIn("test", changedesc.fields_changed)
self.assertIn("old", changedesc.fields_changed["test"])
self.assertIn("new", changedesc.fields_changed["test"])
self.assertNotIn("added", changedesc.fields_changed["test"])
self.assertNotIn("removed", changedesc.fields_changed["test"])
self.assertEqual(changedesc.fields_changed["test"]["old"],
(old_value,))
self.assertEqual(changedesc.fields_changed["test"]["new"],
(new_value,))
def test_record_list(self):
"""Testing ChangeDescription.record_field_change with a list value"""
old_value = [1, 2, 3]
new_value = [2, 3, 4]
changedesc = ChangeDescription()
changedesc.record_field_change("test", old_value, new_value)
self.assertIn("test", changedesc.fields_changed)
self.assertIn("old", changedesc.fields_changed["test"])
self.assertIn("new", changedesc.fields_changed["test"])
self.assertIn("added", changedesc.fields_changed["test"])
self.assertIn("removed", changedesc.fields_changed["test"])
self.assertEqual(changedesc.fields_changed["test"]["old"],
[(i,) for i in old_value])
self.assertEqual(changedesc.fields_changed["test"]["new"],
[(i,) for i in new_value])
self.assertEqual(changedesc.fields_changed["test"]["added"], [(4,)])
self.assertEqual(changedesc.fields_changed["test"]["removed"], [(1,)])
def test_record_object_list_name_field(self):
"""Testing ChangeDescription.record_field_change with an object list
(using name_field)
"""
class DummyObject(object):
def __init__(self, id):
self.id = id
self.text = "Object %s" % id
def get_absolute_url(self):
return "http://localhost/%s" % self.id
objs = [DummyObject(i) for i in range(4)]
old_value = [objs[0], objs[1], objs[2]]
new_value = [objs[1], objs[2], objs[3]]
changedesc = ChangeDescription()
changedesc.record_field_change("test", old_value, new_value, "text")
self.assertIn("test", changedesc.fields_changed)
self.assertIn("old", changedesc.fields_changed["test"])
self.assertIn("new", changedesc.fields_changed["test"])
self.assertIn("added", changedesc.fields_changed["test"])
self.assertIn("removed", changedesc.fields_changed["test"])
self.assertEqual(set(changedesc.fields_changed["test"]["old"]),
set([(obj.text, obj.get_absolute_url(), obj.id)
for obj in old_value]))
self.assertEqual(set(changedesc.fields_changed["test"]["new"]),
set([(obj.text, obj.get_absolute_url(), obj.id)
for obj in new_value]))
self.assertEqual(set(changedesc.fields_changed["test"]["added"]),
set([(new_value[2].text,
new_value[2].get_absolute_url(),
new_value[2].id)]))
self.assertEqual(set(changedesc.fields_changed["test"]["removed"]),
set([(old_value[0].text,
old_value[0].get_absolute_url(),
old_value[0].id)]))
def test_record_list_mismatch_type(self):
"""Testing ChangeDescription.record_field_change with
mismatched types
"""
changedesc = ChangeDescription()
self.assertRaises(ValueError,
changedesc.record_field_change,
"test", 123, True)
def test_is_new_for_user_with_non_owner(self):
"""Testing ChangeDescription.is_new_for_user with non-owner"""
user1 = User.objects.create_user(username='test-user-1',
email='<EMAIL>')
user2 = User.objects.create_user(username='test-user-2',
email='<EMAIL>')
changedesc = ChangeDescription(
user=user1,
timestamp=datetime(2017, 9, 7, 15, 27, 0))
self.assertTrue(changedesc.is_new_for_user(
user=user2,
last_visited=datetime(2017, 9, 7, 10, 0, 0)))
self.assertFalse(changedesc.is_new_for_user(
user=user2,
last_visited=datetime(2017, 9, 7, 16, 0, 0)))
self.assertFalse(changedesc.is_new_for_user(
user=user2,
last_visited=datetime(2017, 9, 7, 15, 27, 0)))
def test_is_new_for_user_with_owner(self):
"""Testing ChangeDescription.is_new_for_user with owner"""
user = User.objects.create_user(username='test-user',
email='<EMAIL>')
changedesc = ChangeDescription(
user=user,
timestamp=datetime(2017, 9, 7, 15, 27, 0))
self.assertFalse(changedesc.is_new_for_user(
user=user,
last_visited=datetime(2017, 9, 7, 16, 0, 0)))
|
11528386
|
from django.conf.urls import url
from dojo.development_environment import views
urlpatterns = [
# dev envs
url(r'^dev_env$', views.dev_env, name='dev_env'),
url(r'^dev_env/add$', views.add_dev_env,
name='add_dev_env'),
url(r'^dev_env/(?P<deid>\d+)/edit$',
views.edit_dev_env, name='edit_dev_env'),
]
|
11528393
|
import os
from invoke import task
WHEELHOUSE_PATH = os.environ.get('WHEELHOUSE')
CONSTRAINTS_FILE = 'constraints.txt'
@task
def wheelhouse(ctx, develop=False):
req_file = 'dev-requirements.txt' if develop else 'requirements.txt'
cmd = 'pip wheel --find-links={} -r {} --wheel-dir={} -c {}'.format(WHEELHOUSE_PATH, req_file, WHEELHOUSE_PATH, CONSTRAINTS_FILE)
ctx.run(cmd, pty=True)
@task
def install(ctx, develop=False):
ctx.run('python setup.py develop')
req_file = 'dev-requirements.txt' if develop else 'requirements.txt'
cmd = 'pip install --upgrade -r {} -c {}'.format(req_file, CONSTRAINTS_FILE)
if WHEELHOUSE_PATH:
cmd += ' --no-index --find-links={}'.format(WHEELHOUSE_PATH)
ctx.run(cmd, pty=True)
@task
def flake(ctx):
ctx.run('flake8 .', pty=True)
@task
def test(ctx, verbose=False, nocov=False, extension=None, path=None):
"""Run full or customized tests for MFR.
:param ctx: the ``invoke`` context
:param verbose: the flag to increase verbosity
:param nocov: the flag to disable coverage
:param extension: limit the tests to the given extension only
:param path: limit the tests to the given path only
:return: None
"""
flake(ctx)
# `--extension=` and `--path=` are mutually exclusive options
assert not (extension and path)
if path:
path = '/{}'.format(path) if path else ''
elif extension:
path = '/extensions/{}/'.format(extension) if extension else ''
else:
path = ''
coverage = ' --cov-report term-missing --cov mfr' if not nocov else ''
verbose = '-v' if verbose else ''
cmd = 'py.test{} tests{} {}'.format(coverage, path, verbose)
ctx.run(cmd, pty=True)
@task
def server(ctx):
if os.environ.get('REMOTE_DEBUG', None):
import pydevd
# e.g. '127.0.0.1:5678'
remote_parts = os.environ.get('REMOTE_DEBUG').split(':')
pydevd.settrace(remote_parts[0], port=int(remote_parts[1]), suspend=False, stdoutToServer=True, stderrToServer=True)
from mfr.server.app import serve
serve()
|
11528395
|
import os
import time
import numpy as np
import psutil
from classicML import CLASSICML_LOGGER
def _format_and_display_the_time_spent(start_time=None, end_time=None, time_spent_list=None, repeat=None):
"""格式化并显示运行时间.
Arguments:
start_time: float, default=None,
函数开始运行的时间.
end_time: float, default=None,
函数结束的时间.
time_spent_list: numpy.ndarray, default=None,
记录多次运行时间的列表.
repeat: int, default=None,
重复运行的次数.
"""
if start_time is not None:
time_spent = (end_time - start_time) * 1000 * 1000 # 返回时间的单位是s, s -> us.
if int(time_spent) // 1000 // 1000 > 0:
CLASSICML_LOGGER.info('耗时 {:.3f} s'.format(time_spent / 1000 / 1000))
elif int(time_spent) // 1000 > 0:
CLASSICML_LOGGER.info('耗时 {:.3f} ms'.format(time_spent / 1000))
else:
CLASSICML_LOGGER.info('耗时 {:.3f} us'.format(time_spent))
else:
time_spent_list = time_spent_list * 1000 * 1000 # 返回时间的单位是s, s -> us.
average_time_spent = np.mean(time_spent_list)
std_time = np.std(time_spent_list)
min_time = np.min(time_spent_list)
max_time = np.max(time_spent_list)
if int(max_time) // 1000 // 1000 > 0:
average_time_spent /= (1000 * 1000)
std_time /= 1000
min_time /= (1000 * 1000)
max_time /= (1000 * 1000)
unit1, unit2 = 's', 'ms'
elif int(max_time) // 1000 > 0:
average_time_spent /= 1000
min_time /= 1000
max_time /= 1000
unit1, unit2 = 'ms', 'us'
else:
unit1, unit2 = 'us', 'us'
CLASSICML_LOGGER.info('平均耗时 {:.3f} %s ± {:.0f} %s, {:.3f} %s, {:.3f} %s; 循环次数 {:d} (mean ± std, max, min)'
.format(average_time_spent, std_time, max_time, min_time, repeat)
% (unit1, unit2, unit1, unit1))
# TODO(<NAME>, tag:code): 将@timer和@average_timer()合并, 并且可以不使用括号.
def average_timer(repeat=5):
"""程序平均计时装饰器.
Arguments:
repeat: int, default=5,
重复运行的次数.
Notes:
- 使用该装饰器统计平均计时会明显降低运行速度,
请在开发时使用, 避免在训练模型时使用.
"""
def decorator(function):
def wrapper(*args, **kwargs):
return_values = None
time_spent_list = list()
for i in range(repeat):
start_time = time.perf_counter()
return_values = function(*args, **kwargs)
end_time = time.perf_counter()
time_spent_list.append(end_time - start_time)
_format_and_display_the_time_spent(None, None, np.asarray(time_spent_list), repeat)
# 函数返回值为最后一次的返回值.
return return_values
return wrapper
return decorator
def memory_monitor(function):
"""内存监视装饰器.
Notes:
- 使用该装饰器统计内存信息, 有潜在降低运行速度的可能性.
并且psutil针对的Python优化手段会导致在CC引擎的速度大幅降低.
"""
def wrapper(*args, **kwargs):
return_values = function(*args, **kwargs)
pid = os.getpid()
current_process = psutil.Process(pid)
process_memory = current_process.memory_full_info()
CLASSICML_LOGGER.info('占用内存 {:.5f} MB'.format(process_memory.uss / 1024 / 1024))
return return_values
return wrapper
def timer(function):
"""程序计时装饰器.
"""
def wrapper(*args, **kwargs):
start_time = time.perf_counter() # 注意将记录time.sleep()的时间
return_values = function(*args, **kwargs)
end_time = time.perf_counter()
_format_and_display_the_time_spent(start_time, end_time, None, None)
return return_values
return wrapper
|
11528504
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
import sys
import tempfile
from observations.r.icecream import icecream
def test_icecream():
"""Test module icecream.py by downloading
icecream.csv and testing shape of
extracted data has 30 rows and 4 columns
"""
test_path = tempfile.mkdtemp()
x_train, metadata = icecream(test_path)
try:
assert x_train.shape == (30, 4)
except:
shutil.rmtree(test_path)
raise()
|
11528530
|
import os
import sys
import numpy
import theano
import theano.tensor as T
from theano.tensor.shared_randomstreams import RandomStreams
import cPickle
from dataset import CIFAR10
from layer import StackedLayer
from classifier import LogisticRegression
from model import ClassicalAutoencoder, ZerobiasAutoencoder, LinearAutoencoder
from preprocess import SubtractMeanAndNormalizeH, PCA
from train import GraddescentMinibatch, Dropout
from params import save_params, load_params, set_params, get_params
from minimize import minimize
#######################
# SET SUPER PARAMETER #
#######################
pca_retain = 800
hid_layer_sizes = [4000, 1000, 4000, 1000, 4000, 1000, 4000]
batchsize = 100
zae_threshold=1.
momentum = 0.9
pretrain_lr_zae = 1e-3
pretrain_lr_lin = 1e-4
weightdecay = 0.001
pretrain_epc = 600
logreg_lr = 0.5
logreg_epc = 1000
finetune_lr = 5e-3
finetune_epc = 1000
print " "
print "pca_retain =", pca_retain
print "hid_layer_sizes =", hid_layer_sizes
print "batchsize =", batchsize
print "zae_threshold =", zae_threshold
print "momentum =", momentum
print "pretrain, zae: lr = %f, epc = %d" % (pretrain_lr_zae, pretrain_epc)
print "pretrain, lin: lr = %f, epc = %d, wd = %.3f" % (pretrain_lr_lin, pretrain_epc, weightdecay)
print "logistic regression: lr = %f, epc = %d" % (logreg_lr, logreg_epc)
print "finetune: lr = %f, epc = %d" % (finetune_lr, finetune_epc)
#############
# LOAD DATA #
#############
cifar10_data = CIFAR10()
train_x, train_y = cifar10_data.get_train_set()
test_x, test_y = cifar10_data.get_test_set()
print "\n... pre-processing"
preprocess_model = SubtractMeanAndNormalizeH(train_x.shape[1])
map_fun = theano.function([preprocess_model.varin], preprocess_model.output())
pca_obj = PCA()
pca_obj.fit(map_fun(train_x), retain=pca_retain, whiten=True)
preprocess_model = preprocess_model + pca_obj.forward_layer
preprocess_function = theano.function([preprocess_model.varin], preprocess_model.output())
train_x = preprocess_function(train_x)
test_x = preprocess_function(test_x)
feature_num = train_x.shape[0] * train_x.shape[1]
train_x = theano.shared(value=train_x, name='train_x', borrow=True)
train_y = theano.shared(value=train_y, name='train_y', borrow=True)
test_x = theano.shared(value=test_x, name='test_x', borrow=True)
test_y = theano.shared(value=test_y, name='test_y', borrow=True)
print "Done."
#########################
# BUILD PRE-TRAIN MODEL #
#########################
print "... building pre-train model"
npy_rng = numpy.random.RandomState(123)
model = ZerobiasAutoencoder(
train_x.get_value().shape[1], hid_layer_sizes[0],
init_w = theano.shared(
value=0.01 * train_x.get_value()[:hid_layer_sizes[0], :].T,
name='w_zae_0',
borrow=True
),
threshold=zae_threshold, vistype='real', tie=True, npy_rng=npy_rng
) + SubtractMeanAndNormalizeH(hid_layer_sizes[0]
) + LinearAutoencoder(
hid_layer_sizes[0], hid_layer_sizes[1],
init_w = theano.shared(
value=numpy.tile(
0.01 * train_x.get_value(),
(hid_layer_sizes[0] * hid_layer_sizes[1] / feature_num + 1, 1)
).flatten()[:(hid_layer_sizes[0] * hid_layer_sizes[1])].reshape(
hid_layer_sizes[0], hid_layer_sizes[1]
),
name='w_ae_1',
borrow=True
),
vistype = 'real', tie=True, npy_rng=npy_rng
) + SubtractMeanAndNormalizeH(hid_layer_sizes[1]
) + ZerobiasAutoencoder(
hid_layer_sizes[1], hid_layer_sizes[2],
init_w = theano.shared(
value=numpy.tile(
0.01 * train_x.get_value(),
(hid_layer_sizes[1] * hid_layer_sizes[2] / feature_num + 1, 1)
).flatten()[:(hid_layer_sizes[1] * hid_layer_sizes[2])].reshape(
hid_layer_sizes[1], hid_layer_sizes[2]
),
name='w_zae_2',
borrow=True
),
threshold=zae_threshold, vistype='real', tie=True, npy_rng=npy_rng
) + SubtractMeanAndNormalizeH(hid_layer_sizes[2]
) + LinearAutoencoder(
hid_layer_sizes[2], hid_layer_sizes[3],
init_w = theano.shared(
value=numpy.tile(
0.01 * train_x.get_value(),
(hid_layer_sizes[2] * hid_layer_sizes[3] / feature_num + 1, 1)
).flatten()[:(hid_layer_sizes[2] * hid_layer_sizes[3])].reshape(
hid_layer_sizes[2], hid_layer_sizes[3]
),
name='w_ae_3',
borrow=True
),
vistype = 'real', tie=True, npy_rng=npy_rng
) + SubtractMeanAndNormalizeH(hid_layer_sizes[3]
) + ZerobiasAutoencoder(
hid_layer_sizes[3], hid_layer_sizes[4],
init_w = theano.shared(
value=numpy.tile(
0.01 * train_x.get_value(),
(hid_layer_sizes[3] * hid_layer_sizes[4] / feature_num + 1, 1)
).flatten()[:(hid_layer_sizes[3] * hid_layer_sizes[4])].reshape(
hid_layer_sizes[3], hid_layer_sizes[4]
),
name='w_zae_4',
borrow=True
),
threshold=zae_threshold, vistype='real', tie=True, npy_rng=npy_rng
) + SubtractMeanAndNormalizeH(hid_layer_sizes[4]
) + LinearAutoencoder(
hid_layer_sizes[4], hid_layer_sizes[5],
init_w = theano.shared(
value=numpy.tile(
0.01 * train_x.get_value(),
(hid_layer_sizes[4] * hid_layer_sizes[5] / feature_num + 1, 1)
).flatten()[:(hid_layer_sizes[4] * hid_layer_sizes[5])].reshape(
hid_layer_sizes[4], hid_layer_sizes[5]
),
name='w_ae_5',
borrow=True
),
vistype = 'real', tie=True, npy_rng=npy_rng
) + SubtractMeanAndNormalizeH(hid_layer_sizes[5]
) + ZerobiasAutoencoder(
hid_layer_sizes[5], hid_layer_sizes[6],
init_w = theano.shared(
value=numpy.tile(
0.01 * train_x.get_value(),
(hid_layer_sizes[5] * hid_layer_sizes[6] / feature_num + 1, 1)
).flatten()[:(hid_layer_sizes[5] * hid_layer_sizes[6])].reshape(
hid_layer_sizes[5], hid_layer_sizes[6]
),
name='w_zae_6',
borrow=True
),
threshold=zae_threshold, vistype='real', tie=True, npy_rng=npy_rng
)
model.models_stack[2].params = [model.models_stack[2].w]
model.models_stack[2].params_private = [model.models_stack[2].w, model.models_stack[2].bT]
model.models_stack[6].params = [model.models_stack[6].w]
model.models_stack[6].params_private = [model.models_stack[6].w, model.models_stack[6].bT]
model.models_stack[10].params = [model.models_stack[10].w]
model.models_stack[10].params_private = [model.models_stack[10].w, model.models_stack[10].bT]
model.print_layer()
print "Done."
#############
# PRE-TRAIN #
#############
theano_rng = RandomStreams(123)
for i in range(0, len(model.models_stack), 2):
if (i + 2) % 4 == 0:
model.models_stack[i-2].threshold = 0.
model.models_stack[i-1].varin = model.models_stack[i-2].output()
print "\n\nPre-training layer %d:" % i
layer_dropout = Dropout(model.models_stack[i], droprates=[0.2, 0.5], theano_rng=theano_rng).dropout_model
layer_dropout.varin = model.models_stack[i].varin
if (i + 2) % 4 == 0:
model.models_stack[i-2].threshold = 0.
pretrain_lr = pretrain_lr_lin
layer_cost = layer_dropout.cost() + layer_dropout.weightdecay(weightdecay)
else:
pretrain_lr = pretrain_lr_zae
layer_cost = layer_dropout.cost()
trainer = GraddescentMinibatch(
varin=model.varin, data=train_x,
cost=layer_cost,
params=layer_dropout.params_private,
supervised=False,
batchsize=batchsize, learningrate=pretrain_lr, momentum=momentum,
rng=npy_rng
)
prev_cost = numpy.inf
patience = 0
for epoch in xrange(pretrain_epc):
cost = trainer.epoch()
if prev_cost <= cost:
patience += 1
if patience > 10:
patience = 0
trainer.set_learningrate(0.9 * trainer.learningrate)
if trainer.learningrate < 1e-10:
break
prev_cost = cost
save_params(model, 'ZLIN_4000_1000_4000_1000_4000_1000_4000_normhid_nolinb_cae1_dropout.npy')
print "Done."
#########################
# BUILD FINE-TUNE MODEL #
#########################
print "\n\n... building fine-tune model -- contraction 1"
for imodel in model.models_stack:
imodel.threshold = 0.
model_ft = model + LogisticRegression(
hid_layer_sizes[-1], 10, npy_rng=npy_rng
)
model_ft.print_layer()
train_set_error_rate = theano.function(
[],
T.mean(T.neq(model_ft.models_stack[-1].predict(), train_y)),
givens = {model_ft.varin : train_x},
)
test_set_error_rate = theano.function(
[],
T.mean(T.neq(model_ft.models_stack[-1].predict(), test_y)),
givens = {model_ft.varin : test_x},
)
print "Done."
print "... training with conjugate gradient: minimize.py"
fun_cost = theano.function(
[model_ft.varin, model_ft.models_stack[-1].vartruth],
model_ft.models_stack[-1].cost() + model_ft.models_stack[-1].weightdecay(weightdecay)
)
def return_cost(test_params, input_x, truth_y):
tmp = get_params(model_ft.models_stack[-1])
set_params(model_ft.models_stack[-1], test_params)
result = fun_cost(input_x, truth_y)
set_params(model_ft.models_stack[-1], tmp)
return result
fun_grad = theano.function(
[model_ft.varin, model_ft.models_stack[-1].vartruth],
T.grad(model_ft.models_stack[-1].cost() + model_ft.models_stack[-1].weightdecay(weightdecay),
model_ft.models_stack[-1].params)
)
def return_grad(test_params, input_x, truth_y):
tmp = get_params(model_ft.models_stack[-1])
set_params(model_ft.models_stack[-1], test_params)
result = numpy.concatenate([numpy.array(i).flatten() for i in fun_grad(input_x, truth_y)])
set_params(model_ft.models_stack[-1], tmp)
return result
p, g, numlinesearches = minimize(
get_params(model_ft.models_stack[-1]), return_cost, return_grad,
(train_x.get_value(), train_y.get_value()), logreg_epc, verbose=False
)
set_params(model_ft.models_stack[-1], p)
save_params(model_ft, 'ZLIN_4000_1000_4000_1000_4000_1000_4000_10_normhid_nolinb_cae1_dropout.npy')
print "***error rate: train: %f, test: %f" % (
train_set_error_rate(), test_set_error_rate()
)
#############
# FINE-TUNE #
#############
"""
print "\n\n... fine-tuning the whole network"
truth = T.lmatrix('truth')
trainer = GraddescentMinibatch(
varin=model_ft.varin, data=train_x,
truth=model_ft.models_stack[-1].vartruth, truth_data=train_y,
supervised=True,
cost=model_ft.models_stack[-1].cost(),
params=model.params,
batchsize=batchsize, learningrate=finetune_lr, momentum=momentum,
rng=npy_rng
)
prev_cost = numpy.inf
for epoch in xrange(finetune_epc):
cost = trainer.epoch()
if epoch % 100 == 0 and epoch != 0: # prev_cost <= cost:
trainer.set_learningrate(trainer.learningrate*0.8)
if epoch % 50 == 0:
print "***error rate: train: %f, test: %f" % (
train_set_error_rate(), test_set_error_rate()
)
prev_cost = cost
print "Done."
"""
print "\n\n... fine-tuning the whole network, with dropout"
theano_rng = RandomStreams(123)
dropout_ft = Dropout(model_ft, droprates=[0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1], theano_rng=theano_rng).dropout_model
dropout_ft.print_layer()
trainer = GraddescentMinibatch(
varin=dropout_ft.varin, data=train_x,
truth=dropout_ft.models_stack[-1].vartruth, truth_data=train_y,
supervised=True,
cost=dropout_ft.models_stack[-1].cost(),
params=dropout_ft.params,
batchsize=batchsize, learningrate=finetune_lr, momentum=momentum,
rng=npy_rng
)
prev_cost = numpy.inf
patience = 0
for epoch in xrange(1000):
cost = trainer.epoch()
if prev_cost <= cost:
patience += 1
if patience > 5:
patience = 0
trainer.set_learningrate(trainer.learningrate * 0.9)
if trainer.learningrate < 1e-10:
break
print "***error rate: train: %f, test: %f" % (train_set_error_rate(), test_set_error_rate())
prev_cost = cost
print "Done."
print "***FINAL error rate, train: %f, test: %f" % (
train_set_error_rate(), test_set_error_rate()
)
save_params(model_ft, 'ZLIN_4000_1000_4000_1000_4000_1000_4000_10_normhid_nolinb_cae1_dropout_dpft.npy')
|
11528546
|
from django.core.management.base import BaseCommand
from datetime import date
from intake import models
from formation.fields import DateOfBirthField
class Command(BaseCommand):
help = "Fills dob column from answers['dob'] field"
def handle(self, *args, **options):
subs = models.FormSubmission.objects.filter(dob__isnull=True)
migrated = 0
errored = 0
for sub in subs:
if not sub.dob:
try:
field = DateOfBirthField(sub.answers)
if field.is_valid():
sub.dob = date(**field.get_current_value())
sub.save(update_fields=['dob'])
migrated += 1
else:
print(vars(sub))
errored += 1
except Exception as e:
print(e)
print(vars(sub))
errored += 1
self.stdout.write(self.style.SUCCESS(
"Updated dob on {} submissions".format(migrated)))
self.stdout.write(self.style.ERROR(
"Failed to parse or update {} submissions".format(errored)))
|
11528547
|
import unittest
from asq.queryables import Queryable
from asq.test.test_queryable import infinite
__author__ = "<NAME>"
class TestEqualOperator(unittest.TestCase):
def test_eq_positive(self):
a = [1, 2, 3, 4, 16, 32]
b = (1, 2, 3, 4, 16, 32)
c = Queryable(a) == b
self.assertTrue(c)
def test_eq_negative(self):
a = [1, 2, 3, 4, 16, 32]
b = (1, 2, 3, 5, 16, 32)
c = Queryable(a) == b
self.assertFalse(c)
def test_eq_shorter_longer(self):
a = [1, 2, 3]
b = (1, 2, 3, 4, 16, 32)
c = Queryable(a) == b
self.assertFalse(c)
def test_eq_longer_shorter(self):
a = [1, 2, 3, 4, 5, 6]
b = (1, 2, 3)
c = Queryable(a) == b
self.assertFalse(c)
def test_eq_empty(self):
a = []
b = ()
c = Queryable(a) == b
self.assertTrue(c)
def test_eq_non_iterable(self):
a = [1, 2, 3]
b = None
self.assertRaises(TypeError, lambda: Queryable(a) == b)
def test_eq_order(self):
a = [1, 2]
b = (2, 1)
c = Queryable(a) == b
self.assertFalse(c)
def test_eq_finite_infinite(self):
a = infinite()
b = (1, 2, 3, 5, 16, 32)
c = Queryable(a) == b
self.assertFalse(c)
def test_eq_infinite_finite(self):
a = (1, 2, 3, 5, 16, 32)
b = infinite()
c = Queryable(a) == b
self.assertFalse(c)
def test_eq_closed(self):
a = [1, 2, 3, 4, 16, 32]
b = (1, 2, 3, 4, 16, 32)
c = Queryable(a)
c.close()
self.assertRaises(ValueError, lambda: c == b)
class TestNotEqualOperator(unittest.TestCase):
def test_ne_negative(self):
a = [1, 2, 3, 4, 16, 32]
b = (1, 2, 3, 4, 16, 32)
c = Queryable(a) != b
self.assertFalse(c)
def test_ne_positive(self):
a = [1, 2, 3, 4, 16, 32]
b = (1, 2, 3, 5, 16, 32)
c = Queryable(a) != b
self.assertTrue(c)
def test_ne_shorter_longer(self):
a = [1, 2, 3]
b = (1, 2, 3, 4, 16, 32)
c = Queryable(a) != b
self.assertTrue(c)
def test_ne_longer_shorter(self):
a = [1, 2, 3, 4, 5, 6]
b = (1, 2, 3)
c = Queryable(a) != b
self.assertTrue(c)
def test_ne_empty(self):
a = []
b = ()
c = Queryable(a) != b
self.assertFalse(c)
def test_ne_non_iterable(self):
a = [1, 2, 3]
b = None
self.assertRaises(TypeError, lambda: Queryable(a) != b)
def test_ne_order(self):
a = [1, 2]
b = (2, 1)
c = Queryable(a) != b
self.assertTrue(c)
def test_ne_finite_infinite(self):
a = infinite()
b = (1, 2, 3, 5, 16, 32)
c = Queryable(a) != b
self.assertTrue(c)
def test_ne_infinite_finite(self):
a = (1, 2, 3, 5, 16, 32)
b = infinite()
c = Queryable(a) != b
self.assertTrue(c)
def test_ne_closed(self):
a = [1, 2, 3, 4, 16, 32]
b = (1, 2, 3, 4, 16, 32)
c = Queryable(a)
c.close()
self.assertRaises(ValueError, lambda: c != b)
|
11528565
|
from arekit.common.data.input.providers.columns.opinion import OpinionColumnsProvider
from arekit.common.data.input.providers.columns.sample import SampleColumnsProvider
from arekit.common.data.input.providers.opinions import InputTextOpinionProvider
from arekit.common.data.input.providers.rows.opinions import BaseOpinionsRowProvider
from arekit.common.data.input.repositories.opinions import BaseInputOpinionsRepository
from arekit.common.data.input.repositories.sample import BaseInputSamplesRepository
from arekit.common.data.storages.base import BaseRowsStorage
from arekit.common.experiment.api.base import BaseExperiment
from arekit.common.experiment.data_type import DataType
from arekit.common.experiment.engine import ExperimentEngine
from arekit.common.labels.str_fmt import StringLabelsFormatter
from arekit.contrib.bert.samplers.factory import create_bert_sample_provider
class BertExperimentInputSerializer(ExperimentEngine):
def __init__(self, experiment,
labels_formatter,
skip_if_folder_exists,
sample_provider_type,
entity_formatter,
balance_train_samples):
assert(isinstance(experiment, BaseExperiment))
assert(isinstance(skip_if_folder_exists, bool))
assert(isinstance(labels_formatter, StringLabelsFormatter))
super(BertExperimentInputSerializer, self).__init__(experiment)
self.__skip_if_folder_exists = skip_if_folder_exists
self.__entity_formatter = entity_formatter
self.__sample_provider_type = sample_provider_type
self.__balance_train_samples = balance_train_samples
self.__labels_formatter = labels_formatter
# region private methods
def __handle_iteration(self, data_type):
assert(isinstance(data_type, DataType))
# Create samples formatter.
sample_rows_provider = create_bert_sample_provider(
labels_formatter=self.__labels_formatter,
provider_type=self.__sample_provider_type,
label_scaler=self._experiment.DataIO.LabelsScaler,
entity_formatter=self.__entity_formatter)
# Create repositories
opinions_repo = BaseInputOpinionsRepository(
columns_provider=OpinionColumnsProvider(),
rows_provider=BaseOpinionsRowProvider(),
storage=BaseRowsStorage())
samples_repo = BaseInputSamplesRepository(
columns_provider=SampleColumnsProvider(store_labels=True),
rows_provider=sample_rows_provider,
storage=BaseRowsStorage())
# Create opinion provider
opinion_provider = InputTextOpinionProvider.create(
value_to_group_id_func=None,
parse_news_func=lambda doc_id: self._experiment.DocumentOperations.parse_doc(doc_id),
iter_doc_opins=lambda doc_id:
self._experiment.OpinionOperations.iter_opinions_for_extraction(doc_id=doc_id, data_type=data_type),
terms_per_context=self._experiment.DataIO.TermsPerContext)
# Populate repositories
opinions_repo.populate(opinion_provider=opinion_provider,
doc_ids=list(self._experiment.DocumentOperations.iter_doc_ids(data_type)),
desc="opinion")
samples_repo.populate(opinion_provider=opinion_provider,
doc_ids=list(self._experiment.DocumentOperations.iter_doc_ids(data_type)),
desc="sample")
if self._experiment.ExperimentIO.balance_samples(data_type=data_type, balance=self.__balance_train_samples):
samples_repo.balance()
# Save repositories
samples_repo.write(
target=self._experiment.ExperimentIO.create_samples_writer_target(data_type),
writer=self._experiment.ExperimentIO.create_samples_writer())
opinions_repo.write(
target=self._experiment.ExperimentIO.create_opinions_writer_target(data_type),
writer=self._experiment.ExperimentIO.create_opinions_writer())
# endregion
# region protected methods
def _handle_iteration(self, it_index):
""" Performing data serialization for a particular iteration
"""
for data_type in self._experiment.DocumentOperations.DataFolding.iter_supported_data_types():
self.__handle_iteration(data_type)
def _before_running(self):
self._logger.info("Perform annotation ...")
for data_type in self._experiment.DocumentOperations.DataFolding.iter_supported_data_types():
collections_it = self._experiment.DataIO.Annotator.iter_annotated_collections(
data_type=data_type,
opin_ops=self._experiment.OpinionOperations,
doc_ops=self._experiment.DocumentOperations)
for doc_id, collection in collections_it:
target = self._experiment.ExperimentIO.create_opinion_collection_target(
doc_id=doc_id,
data_type=data_type)
self._experiment.write_opinion_collection(
collection=collection,
target=target,
labels_formatter=self.__labels_formatter)
# endregion
|
11528582
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.electric_load_center import GeneratorFuelCellAuxiliaryHeater
log = logging.getLogger(__name__)
class TestGeneratorFuelCellAuxiliaryHeater(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_generatorfuelcellauxiliaryheater(self):
pyidf.validation_level = ValidationLevel.error
obj = GeneratorFuelCellAuxiliaryHeater()
# alpha
var_name = "Name"
obj.name = var_name
# real
var_excess_air_ratio = 2.2
obj.excess_air_ratio = var_excess_air_ratio
# real
var_ancillary_power_constant_term = 3.3
obj.ancillary_power_constant_term = var_ancillary_power_constant_term
# real
var_ancillary_power_linear_term = 4.4
obj.ancillary_power_linear_term = var_ancillary_power_linear_term
# real
var_skin_loss_ufactor_times_area_value = 5.5
obj.skin_loss_ufactor_times_area_value = var_skin_loss_ufactor_times_area_value
# alpha
var_skin_loss_destination = "SurroundingZone"
obj.skin_loss_destination = var_skin_loss_destination
# object-list
var_zone_name_to_receive_skin_losses = "object-list|Zone Name to Receive Skin Losses"
obj.zone_name_to_receive_skin_losses = var_zone_name_to_receive_skin_losses
# alpha
var_heating_capacity_units = "Watts"
obj.heating_capacity_units = var_heating_capacity_units
# real
var_maximum_heating_capacity_in_watts = 9.9
obj.maximum_heating_capacity_in_watts = var_maximum_heating_capacity_in_watts
# real
var_minimum_heating_capacity_in_watts = 10.1
obj.minimum_heating_capacity_in_watts = var_minimum_heating_capacity_in_watts
# real
var_maximum_heating_capacity_in_kmol_per_second = 11.11
obj.maximum_heating_capacity_in_kmol_per_second = var_maximum_heating_capacity_in_kmol_per_second
# real
var_minimum_heating_capacity_in_kmol_per_second = 12.12
obj.minimum_heating_capacity_in_kmol_per_second = var_minimum_heating_capacity_in_kmol_per_second
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.generatorfuelcellauxiliaryheaters[0].name, var_name)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].excess_air_ratio, var_excess_air_ratio)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].ancillary_power_constant_term, var_ancillary_power_constant_term)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].ancillary_power_linear_term, var_ancillary_power_linear_term)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].skin_loss_ufactor_times_area_value, var_skin_loss_ufactor_times_area_value)
self.assertEqual(idf2.generatorfuelcellauxiliaryheaters[0].skin_loss_destination, var_skin_loss_destination)
self.assertEqual(idf2.generatorfuelcellauxiliaryheaters[0].zone_name_to_receive_skin_losses, var_zone_name_to_receive_skin_losses)
self.assertEqual(idf2.generatorfuelcellauxiliaryheaters[0].heating_capacity_units, var_heating_capacity_units)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].maximum_heating_capacity_in_watts, var_maximum_heating_capacity_in_watts)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].minimum_heating_capacity_in_watts, var_minimum_heating_capacity_in_watts)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].maximum_heating_capacity_in_kmol_per_second, var_maximum_heating_capacity_in_kmol_per_second)
self.assertAlmostEqual(idf2.generatorfuelcellauxiliaryheaters[0].minimum_heating_capacity_in_kmol_per_second, var_minimum_heating_capacity_in_kmol_per_second)
|
11528584
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="archivy_extra_metadata",
version="0.1.0",
author="Uzay-G",
description=(
"Archivy extension to add some metadata at the end of your notes / bookmarks."
),
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
],
packages=find_packages(),
entry_points="""
[archivy.plugins]
extra-metadata=archivy_extra_metadata:extra_metadata
""",
)
|
11528586
|
import math
import os
import unittest
import numpy as np
import pandas as pd
from scripts.utils.import_patstat import convert_patstat_data_to_data_frame
def create_df_with_unused_columns(df_dict, unused_keys):
dict_key = list(df_dict.keys())[0]
num_copies = len(df_dict[dict_key])
for unused_key in unused_keys:
df_dict[unused_key] = [unused_key] * num_copies
return pd.DataFrame(df_dict)
class TestPatstatToDataFrame(unittest.TestCase):
df = None # will be populated by setUpClass
# noinspection PyBroadException
def assertIsNaN(self, value):
msg = f'{str(value)} is not NaN'
try:
if not math.isnan(value):
self.fail(msg)
except:
self.fail(msg)
def setup(self):
# Design of test data:
# appln_id defines patent UID
# family id 'family45' contains appln_id 99, 01, 02
# family id 'family123' contains appln_id 09
# In family45:
# appln_id 99 has no associated applicants or inventors; DE abstract
# appln_id 01 has 2 inventors (12,21)[best cities], 1 applicant (20) and AU abstract
# appln_id 02 has 2 inventors (76,1), 1 applicant (8) [best city] and GB abstract [best abstract]
long_abstract1_same_length = 'abstract for patent appln_id 01 longest here'
long_abstract2_same_length = 'abstract for patent appln_id 02 longest blah'
df_abstract0 = pd.DataFrame({
'appln_id': ['09', '01'],
'appln_abstract_lg': ['abslg0', 'abslg1'],
'appln_abstract': ['abstract for patent appln_id 09', long_abstract1_same_length],
})
df_abstract1 = pd.DataFrame({
'appln_id': ['02', '99', '0'],
'appln_abstract_lg': ['abslg2', 'abslg99', 'abslg0'],
'appln_abstract': [long_abstract2_same_length, 'abstract for appln_id 99',
'abstract for patent appln_id 0'],
})
title_unused_columns = ['appln_title_lg']
df_title0 = create_df_with_unused_columns({
'appln_id': ['09', '01'],
'appln_title': ['Title of patent #09', "Patent #01's title"]},
title_unused_columns)
df_title1 = create_df_with_unused_columns({
'appln_id': ['02'],
'appln_title': ['Title 2']},
title_unused_columns)
applications_unused_columns = ['appln_kind', 'appln_nr', 'appln_nr_epodoc', 'ipr_type', 'internat_appln_id',
'int_phase', 'reg_phase', 'nat_phase', 'earliest_filing_date',
'earliest_filing_year', 'earliest_filing_id', 'earliest_publn_year',
'earliest_pat_publn_id', 'granted', 'inpadoc_family_id', 'docdb_family_size',
'nb_citing_docdb_fam', 'nb_applicants', 'nb_inventors', 'receiving_office']
df_applications0 = create_df_with_unused_columns({
'appln_id': ['99', '09', '01'],
'appln_nr_original': ['apporig_99', 'apporig_09', 'apporig_01'],
'appln_filing_date': ['1999-01-01', '2010-10-01', '1999-07-12'],
'earliest_publn_date': ['2001-07-21', '2011-02-12', '1999-11-23'],
'appln_filing_year': ['1999', '2010', '1999'],
'appln_auth': ['DE', 'US', 'AU'],
'docdb_family_id': ['family45', 'family123', 'family45']},
applications_unused_columns)
df_applications1 = create_df_with_unused_columns({
'appln_id': ['02', '0'],
'appln_nr_original': ['app_orig02', 'app_orig0'],
'appln_filing_date': ['2007-02-28', '1999-10-17'],
'earliest_publn_date': ['2007-07-08', '1999-11-21'],
'appln_filing_year': ['2007', '1999'],
'appln_auth': ['GB', 'RU'],
'docdb_family_id': ['family45', 'family0']},
applications_unused_columns)
cpc_unused_columns = ['cpc_scheme', 'cpc_version', 'cpc_value', 'cpc_position', 'cpc_gener_auth']
df_cpc0 = create_df_with_unused_columns({
'appln_id': ['09', '01'],
'cpc_class_symbol': ['Q99Q 123/456', 'Y02L 238/7209']},
cpc_unused_columns)
df_cpc1 = create_df_with_unused_columns({
'appln_id': ['02', '02'],
'cpc_class_symbol': ['H01L 24/03', 'Y02L2224/85203']},
cpc_unused_columns)
# APPLT_SEQ_NR >0 => applicant
# INVT_SEQ_NR >0 => inventor
personapp_unused_columns = []
df_personapp0 = create_df_with_unused_columns({
'person_id': ['1', '2', '76', '12'],
'appln_id': ['09', '09', '02', '01'],
'applt_seq_nr': ['0', '1', '0', '0'],
'invt_seq_nr': ['1', '0', '1', '1']},
personapp_unused_columns)
df_personapp1 = create_df_with_unused_columns({
'person_id': ['8', '1', '20', '21'],
'appln_id': ['02', '02', '01', '01'],
'applt_seq_nr': ['1', '0', '1', '0'],
'invt_seq_nr': ['0', '2', '0', '2']},
personapp_unused_columns)
person_unused_columns = ['person_name', 'doc_std_name_id', 'doc_std_name', 'psn_id', 'psn_level']
df_person0 = create_df_with_unused_columns({
'person_id': ['1', '2', '12'],
'person_address': ['73527 Schwäbisch Gmünd', 'somewhere else', np.NaN],
'person_ctry_code': ['DE', 'AU', 'US'],
'psn_name': ['<NAME>', 'SMITH INDUSTRIES', 'A N OTHER'],
'psn_sector': ['INDIVIDUAL', 'COMPANY', 'INDIVIDUAL']},
person_unused_columns)
df_person1 = create_df_with_unused_columns({
'person_id': ['76', '8'],
'person_address': ['Ontario N2L 3W8', 'City Road, Newport NP20 1XJ'],
'person_ctry_code': ['US', 'GB'],
'psn_name': ['A N OTHER', 'FISH I AM'],
'psn_sector': ['INDIVIDUAL', 'COMPANY']},
person_unused_columns)
df_person2 = create_df_with_unused_columns({
'person_id': ['20', '21'],
'person_address': ['home', 'Richard-Bullinger-Strasse 77, 73527 Schwäbisch Gmünd'],
'person_ctry_code': ['GB', 'DE'],
'psn_name': ['<NAME>', '<NAME>'],
'psn_sector': ['COMPANY', 'INDIVIDUAL']},
person_unused_columns)
# duplicates added
df_person3 = create_df_with_unused_columns({
'person_id': ['76', '8'],
'person_address': ['Ontario N2L 3W8', 'home'],
'person_ctry_code': ['US', 'NZ'],
'psn_name': ['<NAME>', '<NAME>'],
'psn_sector': ['INDIVIDUAL', 'COMPANY']},
person_unused_columns)
zip_file_extension = '.zip'
file_prefix = 'tls'
input_folder_name = os.path.join('tests', 'data')
output_folder_name = os.path.join('tests', 'output')
patstat_tables_file_base_name = os.path.join(input_folder_name, file_prefix)
zip_file_names = [
patstat_tables_file_base_name + '203_part01' + zip_file_extension,
patstat_tables_file_base_name + '203_part02' + zip_file_extension,
patstat_tables_file_base_name + '201_part01' + zip_file_extension,
patstat_tables_file_base_name + '201_part02' + zip_file_extension,
patstat_tables_file_base_name + '224_part01' + zip_file_extension,
patstat_tables_file_base_name + '224_part02' + zip_file_extension,
patstat_tables_file_base_name + '207_part01' + zip_file_extension,
patstat_tables_file_base_name + '207_part02' + zip_file_extension,
patstat_tables_file_base_name + '206_part01' + zip_file_extension,
patstat_tables_file_base_name + '206_part02' + zip_file_extension,
patstat_tables_file_base_name + '206_part03' + zip_file_extension,
patstat_tables_file_base_name + '206_part04' + zip_file_extension,
patstat_tables_file_base_name + '202_part01' + zip_file_extension,
patstat_tables_file_base_name + '202_part02' + zip_file_extension
]
text_file_extension = '.txt'
file_names = {
file_prefix + '203_part01' + text_file_extension: df_abstract0,
file_prefix + '203_part02' + text_file_extension: df_abstract1,
file_prefix + '201_part01' + text_file_extension: df_applications0,
file_prefix + '201_part02' + text_file_extension: df_applications1,
file_prefix + '224_part01' + text_file_extension: df_cpc0,
file_prefix + '224_part02' + text_file_extension: df_cpc1,
file_prefix + '207_part01' + text_file_extension: df_personapp0,
file_prefix + '207_part02' + text_file_extension: df_personapp1,
file_prefix + '206_part01' + text_file_extension: df_person0,
file_prefix + '206_part02' + text_file_extension: df_person1,
file_prefix + '206_part03' + text_file_extension: df_person2,
file_prefix + '206_part04' + text_file_extension: df_person3,
file_prefix + '202_part01' + text_file_extension: df_title0,
file_prefix + '202_part02' + text_file_extension: df_title1,
}
pickled_dfs = {}
def to_pickle(df, pickle_file_name):
pickled_dfs[pickle_file_name] = df.copy(deep=True)
def read_pickle(pickle_file_name):
return pickled_dfs[pickle_file_name]
def is_file(file_name):
return file_name in zip_file_names
class StubbedFile(object):
def __init__(self, file_name):
self.__file_name = file_name
self.__df = file_names[file_name]
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def df(self):
return self.__df
def read_csv(stubbed_file):
return stubbed_file.df()
class StubbedZipFile(object):
def __init__(self, file_name):
if file_name not in zip_file_names:
raise ValueError(f'{file_name} not found')
self.file_name = file_name
self.zipped_text_file_name = os.path.splitext(os.path.basename(self.file_name))[0] + '.txt'
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
pass
def namelist(self):
return [self.zipped_text_file_name]
def open(self, file_name_in_archive):
if file_name_in_archive != self.zipped_text_file_name:
raise ValueError(f'{file_name_in_archive} not in zip - expected {self.zipped_text_file_name}')
return StubbedFile(file_name_in_archive)
return input_folder_name, output_folder_name, is_file, StubbedZipFile, read_csv, read_pickle, to_pickle
def extractFullPatstat(self):
input_folder_name, output_folder_name, is_file, StubbedZipFile, read_csv, read_pickle, to_pickle = self.setup()
return convert_patstat_data_to_data_frame(input_folder_name, output_folder_name, False, None, 1.0, None,
is_file, StubbedZipFile,
read_csv, read_pickle, to_pickle)
def extractLitePatstat(self, date_range=None):
input_folder_name, output_folder_name, is_file, StubbedZipFile, read_csv, read_pickle, to_pickle = self.setup()
return convert_patstat_data_to_data_frame(input_folder_name, output_folder_name, True, None, 1.0, date_range,
is_file, StubbedZipFile,
read_csv, read_pickle, to_pickle)
def test_reads_patent_grouped_by_family(self):
df = self.extractFullPatstat()
self.assertEqual(3, df.shape[0])
def test_reads_patent_abstract_preferred_country_in_family(self):
df = self.extractFullPatstat()
self.assertEqual("abstract for patent appln_id 0", df.loc['family0'].abstract)
self.assertEqual("abstract for patent appln_id 02 longest blah", df.loc['family45'].abstract)
self.assertEqual("abstract for patent appln_id 09", df.loc['family123'].abstract)
def test_reads_one_per_family_country_priority(self):
df = self.extractFullPatstat()
self.assertEqual(['family0', 'family45', 'family123'], df.patent_id.tolist())
def test_reads_application_id_preferred_country_in_family(self):
df = self.extractFullPatstat()
self.assertEqual('app_orig0', df.loc['family0'].application_id)
self.assertEqual('app_orig02', df.loc['family45'].application_id)
self.assertEqual('apporig_09', df.loc['family123'].application_id)
def test_reads_application_date_preferred_country_in_family(self):
df = self.extractFullPatstat()
self.assertEqual(pd.Timestamp('1999-10-17 00:00:00'), df.loc['family0'].application_date)
self.assertEqual(pd.Timestamp('2007-02-28 00:00:00'), df.loc['family45'].application_date)
self.assertEqual(pd.Timestamp('2010-10-01 00:00:00'), df.loc['family123'].application_date)
def test_reads_publication_date_preferred_country_in_family(self):
df = self.extractFullPatstat()
self.assertEqual(pd.Timestamp('1999-11-21 00:00:00'), df.loc['family0'].publication_date)
self.assertEqual(pd.Timestamp('2007-07-08 00:00:00'), df.loc['family45'].publication_date)
self.assertEqual(pd.Timestamp('2011-02-12 00:00:00'), df.loc['family123'].publication_date)
def test_reads_application_title_preferred_country_in_family(self):
df = self.extractFullPatstat()
self.assertIsNaN(df.loc['family0'].invention_title)
self.assertEqual('Title 2', df.loc['family45'].invention_title)
self.assertEqual('Title of patent #09', df.loc['family123'].invention_title)
def test_reads_cpc_codes_preferred_country_in_family(self):
df = self.extractFullPatstat()
self.assertIsNaN(df.loc['family0'].classifications_cpc)
self.assertEqual(['H01L 24/03', 'Y02L2224/85203'], df.loc['family45'].classifications_cpc)
self.assertEqual(['Q99Q 123/456'], df.loc['family123'].classifications_cpc)
def test_reads_inventor_names_preferred_country_in_family(self):
df = self.extractFullPatstat()
self.assertIsNaN(df.loc['family0'].inventor_names)
self.assertEqual(['A N OTHER', '<NAME>'], df.loc['family45'].inventor_names)
self.assertEqual(['<NAME>'], df.loc['family123'].inventor_names)
def test_reads_inventor_countries_preferred_country_in_family(self):
df = self.extractFullPatstat()
self.assertIsNaN(df.loc['family0'].inventor_countries)
self.assertEqual(['US', 'DE'], df.loc['family45'].inventor_countries)
self.assertEqual(['DE'], df.loc['family123'].inventor_countries)
def test_reads_inventor_cities_preferred_country_in_family(self):
df = self.extractFullPatstat()
self.assertIsNaN(df.loc['family0'].inventor_cities)
self.assertEqual(['Ontario N2L 3W8', '73527 Schwäbisch Gmünd'], df.loc['family45'].inventor_cities)
self.assertEqual(['73527 Schwäbisch Gmünd'], df.loc['family123'].inventor_cities)
def test_reads_applicant_names_preferred_country_in_family(self):
df = self.extractFullPatstat()
self.assertIsNaN(df.loc['family0'].applicant_organisation)
self.assertEqual(['FISH I AM'], df.loc['family45'].applicant_organisation)
self.assertEqual(['SMITH INDUSTRIES'], df.loc['family123'].applicant_organisation)
def test_reads_applicant_countries_preferred_country_in_family(self):
df = self.extractFullPatstat()
self.assertIsNaN(df.loc['family0'].applicant_countries)
self.assertEqual(['GB'], df.loc['family45'].applicant_countries)
self.assertEqual(['AU'], df.loc['family123'].applicant_countries)
def test_reads_applicant_cities_preferred_country_in_family(self):
df = self.extractFullPatstat()
self.assertIsNaN(df.loc['family0'].applicant_cities)
self.assertEqual(['Newport NP20 1XJ'], df.loc['family45'].applicant_cities)
self.assertEqual(['somewhere else'], df.loc['family123'].applicant_cities)
# def test_reads_first_citations(self):
# df = self.extractFullPatstat()_first_row
# self.assertEqual(2, df.appln_id.item())
# self.assertEqual(['C11B 3/12', 'C11B 11/005', 'C11B 13/00', 'Y02W 30/74'],
# df.classifications_cpc)
def test_lite_reads_patent_abstract_without_filtering(self):
df = self.extractLitePatstat()
self.assertEqual("abstract for patent appln_id 0", df.loc['family0'].abstract)
self.assertEqual('abstract for patent appln_id 01 longest here', df.loc['family45'].abstract)
self.assertEqual('abstract for patent appln_id 09', df.loc['family123'].abstract)
def test_lite_reads_patent_publication_date(self):
df = self.extractLitePatstat()
self.assertEqual(pd.Timestamp('1999-11-21 00:00:00'), df.loc['family0'].publication_date)
self.assertEqual(pd.Timestamp('1999-11-23 00:00:00'), df.loc['family45'].publication_date)
self.assertEqual(pd.Timestamp('2011-02-12 00:00:00'), df.loc['family123'].publication_date)
self.assertEqual(3, df.shape[0])
def test_lite_reads_patent_abstract_with_from_date_filtering(self):
df = self.extractLitePatstat(date_range=[pd.to_datetime('1999-11-22'), pd.to_datetime('today')])
self.assertEqual('abstract for patent appln_id 01 longest here', df.loc['family45'].abstract)
self.assertEqual('abstract for patent appln_id 09', df.loc['family123'].abstract)
self.assertEqual(2, df.shape[0])
def test_lite_reads_patent_abstract_with_from_date_filtering_inclusive_range(self):
df = self.extractLitePatstat(date_range=[pd.to_datetime('1999-11-21'), pd.to_datetime('today')])
self.assertEqual("abstract for patent appln_id 0", df.loc['family0'].abstract)
self.assertEqual('abstract for patent appln_id 01 longest here', df.loc['family45'].abstract)
self.assertEqual('abstract for patent appln_id 09', df.loc['family123'].abstract)
self.assertEqual(3, df.shape[0])
def test_lite_reads_patent_abstract_with_to_date_filtering(self):
df = self.extractLitePatstat(date_range=[pd.to_datetime('1900-01-01'), pd.to_datetime('2011-02-11')])
self.assertEqual("abstract for patent appln_id 0", df.loc['family0'].abstract)
self.assertEqual('abstract for patent appln_id 01 longest here', df.loc['family45'].abstract)
self.assertEqual(2, df.shape[0])
def test_lite_reads_patent_abstract_with_from_to_filtering_inclusive_range(self):
df = self.extractLitePatstat(date_range=[pd.to_datetime('1900-01-01'), pd.to_datetime('2011-02-12')])
self.assertEqual("abstract for patent appln_id 0", df.loc['family0'].abstract)
self.assertEqual('abstract for patent appln_id 01 longest here', df.loc['family45'].abstract)
self.assertEqual('abstract for patent appln_id 09', df.loc['family123'].abstract)
self.assertEqual(3, df.shape[0])
def test_lite_reads_patent_abstract_with_from_and_to_date_filtering(self):
df = self.extractLitePatstat(date_range=[pd.to_datetime('1999-11-22'), pd.to_datetime('2011-02-11')])
self.assertEqual('abstract for patent appln_id 01 longest here', df.loc['family45'].abstract)
self.assertEqual(1, df.shape[0])
def test_lite_stores_specific_columns(self):
df = self.extractLitePatstat()
self.assertListEqual(['publication_date', 'patent_id', 'abstract', 'classifications_cpc'], df.columns.tolist())
|
11528614
|
from __future__ import print_function
import gdbremote_testcase
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestGdbRemoteExpeditedRegisters(
gdbremote_testcase.GdbRemoteTestCaseBase):
mydir = TestBase.compute_mydir(__file__)
@skipIfDarwinEmbedded # <rdar://problem/34539270> lldb-server tests not updated to work on ios etc yet
def gather_expedited_registers(self):
# Setup the stub and set the gdb remote command stream.
procs = self.prep_debug_monitor_and_inferior(inferior_args=["sleep:2"])
self.test_sequence.add_log_lines([
# Start up the inferior.
"read packet: $c#63",
# Immediately tell it to stop. We want to see what it reports.
"read packet: {}".format(chr(3)),
{"direction": "send",
"regex": r"^\$T([0-9a-fA-F]+)([^#]+)#[0-9a-fA-F]{2}$",
"capture": {1: "stop_result",
2: "key_vals_text"}},
], True)
# Run the gdb remote command stream.
context = self.expect_gdbremote_sequence()
self.assertIsNotNone(context)
# Pull out expedited registers.
key_vals_text = context.get("key_vals_text")
self.assertIsNotNone(key_vals_text)
expedited_registers = self.extract_registers_from_stop_notification(
key_vals_text)
self.assertIsNotNone(expedited_registers)
return expedited_registers
def stop_notification_contains_generic_register(
self, generic_register_name):
# Generate a stop reply, parse out expedited registers from stop
# notification.
expedited_registers = self.gather_expedited_registers()
self.assertIsNotNone(expedited_registers)
self.assertTrue(len(expedited_registers) > 0)
# Gather target register infos.
reg_infos = self.gather_register_infos()
# Find the generic register.
reg_info = self.find_generic_register_with_name(
reg_infos, generic_register_name)
self.assertIsNotNone(reg_info)
# Ensure the expedited registers contained it.
self.assertTrue(reg_info["lldb_register_index"] in expedited_registers)
# print("{} reg_info:{}".format(generic_register_name, reg_info))
def stop_notification_contains_any_registers(self):
# Generate a stop reply, parse out expedited registers from stop
# notification.
expedited_registers = self.gather_expedited_registers()
# Verify we have at least one expedited register.
self.assertTrue(len(expedited_registers) > 0)
@debugserver_test
def test_stop_notification_contains_any_registers_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.stop_notification_contains_any_registers()
@llgs_test
def test_stop_notification_contains_any_registers_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.stop_notification_contains_any_registers()
def stop_notification_contains_no_duplicate_registers(self):
# Generate a stop reply, parse out expedited registers from stop
# notification.
expedited_registers = self.gather_expedited_registers()
# Verify no expedited register was specified multiple times.
for (reg_num, value) in list(expedited_registers.items()):
if (isinstance(value, list)) and (len(value) > 0):
self.fail(
"expedited register number {} specified more than once ({} times)".format(
reg_num, len(value)))
@debugserver_test
def test_stop_notification_contains_no_duplicate_registers_debugserver(
self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.stop_notification_contains_no_duplicate_registers()
@llgs_test
def test_stop_notification_contains_no_duplicate_registers_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.stop_notification_contains_no_duplicate_registers()
def stop_notification_contains_pc_register(self):
self.stop_notification_contains_generic_register("pc")
@debugserver_test
def test_stop_notification_contains_pc_register_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.stop_notification_contains_pc_register()
@llgs_test
def test_stop_notification_contains_pc_register_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.stop_notification_contains_pc_register()
# powerpc64 has no FP register
@skipIf(triple='^powerpc64')
def stop_notification_contains_fp_register(self):
self.stop_notification_contains_generic_register("fp")
@debugserver_test
def test_stop_notification_contains_fp_register_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.stop_notification_contains_fp_register()
@llgs_test
def test_stop_notification_contains_fp_register_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.stop_notification_contains_fp_register()
def stop_notification_contains_sp_register(self):
self.stop_notification_contains_generic_register("sp")
@debugserver_test
def test_stop_notification_contains_sp_register_debugserver(self):
self.init_debugserver_test()
self.build()
self.set_inferior_startup_launch()
self.stop_notification_contains_sp_register()
@llgs_test
def test_stop_notification_contains_sp_register_llgs(self):
self.init_llgs_test()
self.build()
self.set_inferior_startup_launch()
self.stop_notification_contains_sp_register()
|
11528635
|
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
from layers import *
from models.resnet import resnet50
from utils.calc_acc import calc_acc
class Model(nn.Module):
def __init__(self, num_classes=None, drop_last_stride=False, joint_training=False, mix=False, neighbor_mode=1,
**kwargs):
super(Model, self).__init__()
self.drop_last_stride = drop_last_stride
self.joint_training = joint_training
self.mix = mix
self.neighbor_mode = neighbor_mode
self.backbone = resnet50(pretrained=True, drop_last_stride=drop_last_stride)
self.bn_neck = nn.BatchNorm1d(2048)
self.bn_neck.bias.requires_grad_(False)
if kwargs.get('eval'):
return
self.scale = kwargs.get('scale')
# ----------- tasks for source domain --------------
if num_classes is not None:
self.classifier = nn.Linear(2048, num_classes, bias=False)
self.id_loss = nn.CrossEntropyLoss(ignore_index=-1)
# ----------- tasks for target domain --------------
if self.joint_training:
cam_ids = kwargs.get('cam_ids')
num_instances = kwargs.get('num_instances', None)
self.neighbor_eps = kwargs.get('neighbor_eps')
# identities captured by each camera
uid2cam = zip(range(num_instances), cam_ids)
self.cam2uid = defaultdict(list)
for uid, cam in uid2cam:
self.cam2uid[cam].append(uid)
# components for neighborhood consistency
self.exemplar_linear = ExemplarLinear(num_instances, 2048)
self.nn_loss = NNLoss(dim=1)
alpha = kwargs.get('alpha')
self.beta_dist = torch.distributions.beta.Beta(alpha, alpha)
self.lambd_st = None
@staticmethod
def mix_source_target(inputs, beta_dist):
half_batch_size = inputs.size(0) // 2
source_input = inputs[:half_batch_size]
target_input = inputs[half_batch_size:]
lambd = beta_dist.sample().item()
mixed_input = lambd * source_input + (1 - lambd) * target_input
return mixed_input, lambd
def forward(self, inputs, labels=None, **kwargs):
if not self.training:
global_feat = self.backbone(inputs)
global_feat = self.bn_neck(global_feat)
return global_feat
else:
batch_size = inputs.size(0)
epoch = kwargs.get('epoch')
if self.joint_training and self.mix and epoch > 1:
mixed_st, self.lambda_st = self.mix_source_target(inputs, self.beta_dist)
inputs = torch.cat([mixed_st, inputs[batch_size // 2:]], dim=0)
return self.train_forward(inputs, labels, batch_size, **kwargs)
def train_forward(self, inputs, labels, batch_size, **kwargs):
epoch = kwargs.get('epoch')
inputs = self.backbone(inputs)
if not self.joint_training: # single domain
inputs = self.bn_neck(inputs)
return self.source_train_forward(inputs, labels)
else: # cross domain
half_batch_size = batch_size // 2
label_s = labels[:half_batch_size]
input_t = inputs[-half_batch_size:]
# source task or mixed task
input_s = inputs[:half_batch_size]
feat_s = F.batch_norm(input_s, None, None, self.bn_neck.weight, self.bn_neck.bias, True)
if not self.mix or epoch <= 1:
loss, metric = self.source_train_forward(feat_s, label_s)
else:
loss, metric = self.mixed_st_forward(feat_s, label_s, **kwargs)
# target task
feat_t = self.bn_neck(input_t)
target_loss, target_metric = self.target_train_forward(feat_t, **kwargs)
# summarize loss and metric
loss += target_loss
metric.update(target_metric)
return loss, metric
# Tasks for source domain
def source_train_forward(self, inputs, labels):
metric_dict = {}
cls_score = self.classifier(inputs)
loss = self.id_loss(cls_score.float(), labels)
metric_dict.update({'id_ce': loss.data,
'id_acc': calc_acc(cls_score.data, labels.data, ignore_index=-1)})
return loss, metric_dict
# Tasks for target domain
def target_train_forward(self, inputs, **kwargs):
metric_dict = {}
target_batch_size = inputs.size(0)
epoch = kwargs.get('epoch')
img_ids = kwargs.get('img_ids')[-target_batch_size:]
cam_ids = kwargs.get('cam_ids')[-target_batch_size:]
# inputs = self.dropout(inputs)
feat = F.normalize(inputs)
# Set updating momentum of the exemplar memory.
# Note the momentum must be 0 at the first iteration.
mom = 0.6
self.exemplar_linear.set_momentum(mom if epoch > 1 else 0)
sim = self.exemplar_linear(feat, img_ids).float()
# ----------------------Neighborhood Constraint------------------------- #
# Camera-agnostic neighborhood loss
if self.neighbor_mode == 0:
loss = self.cam_agnostic_eps_nn_loss(sim, img_ids)
metric_dict.update({'neighbor': loss.data})
weight = 0.1 if epoch > 10 else 0
loss = weight * loss
# Camera-aware neighborhood loss (intra_loss and inter_loss)
elif self.neighbor_mode == 1:
intra_loss, inter_loss = self.cam_aware_eps_nn_loss(sim, cam_ids, img_ids=img_ids, epoch=epoch)
metric_dict.update({'intra': intra_loss.data, 'inter': inter_loss.data})
intra_weight = 1.0 if epoch > 10 else 0
inter_weight = 0.5 if epoch > 30 else 0
loss = intra_weight * intra_loss + inter_weight * inter_loss
return loss, metric_dict
def mixed_st_forward(self, inputs, labels, **kwargs):
img_ids = kwargs.get('img_ids')[-inputs.size(0):]
agent = self.exemplar_linear.memory[img_ids]
cls_score = F.linear(inputs, self.classifier.weight)
sim_agent = inputs.mul(agent).sum(dim=1, keepdim=True)
sim_agent = sim_agent.mul(self.classifier.weight.data[labels].norm(dim=1, keepdim=True))
cls_score = torch.cat([cls_score, sim_agent], dim=1).float()
virtual_label = labels.clone().fill_(cls_score.size(1) - 1)
loss = self.lambda_st * self.id_loss(cls_score, labels)
loss += (1 - self.lambda_st) * self.id_loss(cls_score, virtual_label)
metric = {'mix_st': loss.data}
return loss, metric
def cam_aware_eps_nn_loss(self, sim, cam_ids, **kwargs):
img_ids = kwargs.get('img_ids')
sim_exp = torch.exp(sim * self.scale)
# calculate mask for intra-camera matching and inter-camera matching
mask_instance, mask_intra, mask_inter = self.compute_mask(sim.size(), img_ids, cam_ids, sim.device)
# intra-camera neighborhood loss
sim_intra = (sim.data + 1) * mask_intra * (1 - mask_instance) - 1
nearest_intra = sim_intra.max(dim=1, keepdim=True)[0]
neighbor_mask_intra = torch.gt(sim_intra, nearest_intra * self.neighbor_eps)
num_neighbor_intra = neighbor_mask_intra.sum(dim=1)
sim_exp_intra = sim_exp * mask_intra
score_intra = sim_exp_intra / sim_exp_intra.sum(dim=1, keepdim=True)
score_intra = score_intra.clamp_min(1e-5)
intra_loss = -score_intra.log().mul(neighbor_mask_intra).sum(dim=1).div(num_neighbor_intra).mean()
intra_loss -= score_intra.masked_select(mask_instance.bool()).log().mean()
# inter-camera neighborhood loss
sim_inter = (sim.data + 1) * mask_inter - 1
nearest_inter = sim_inter.max(dim=1, keepdim=True)[0]
neighbor_mask_inter = torch.gt(sim_inter, nearest_inter * self.neighbor_eps)
num_neighbor_inter = neighbor_mask_inter.sum(dim=1)
sim_exp_inter = mask_inter * sim_exp
score_inter = sim_exp_inter / sim_exp_inter.sum(dim=1, keepdim=True)
score_inter = score_inter.clamp_min(1e-5)
inter_loss = -score_inter.log().mul(neighbor_mask_inter).sum(dim=1).div(num_neighbor_inter).mean()
return intra_loss, inter_loss
def cam_agnostic_eps_nn_loss(self, sim, img_ids):
mask_instance = torch.zeros_like(sim)
mask_instance[torch.arange(sim.size(0)), img_ids] = 1
sim_neighbor = (sim.data + 1) * (1 - mask_instance) - 1
nearest = sim_neighbor.max(dim=1, keepdim=True)[0]
neighbor_mask = torch.gt(sim_neighbor, nearest * self.neighbor_eps)
num_neighbor = neighbor_mask.sum(dim=1)
score = F.log_softmax(sim * self.scale, dim=1)
loss = -score.mul(neighbor_mask).sum(dim=1).div(num_neighbor).mean()
loss -= score.masked_select(mask_instance.bool()).mean()
return loss
def compute_mask(self, size, img_ids, cam_ids, device):
mask_inter = torch.ones(size, device=device)
for i, cam in enumerate(cam_ids.tolist()):
intra_cam_ids = self.cam2uid[cam]
mask_inter[i, intra_cam_ids] = 0
mask_intra = 1 - mask_inter
mask_instance = torch.zeros(size, device=device)
mask_instance[torch.arange(size[0]), img_ids] = 1
return mask_instance, mask_intra, mask_inter
|
11528643
|
def binary_to_string(binary):
return ''.join(chr(int(binary[a:a + 8], 2))
for a in xrange(0, len(binary), 8))
|
11528663
|
import ctypes
import numpy as np
import matplotlib.pyplot as plt
import time
import os.path
import cv2
rows = 800
cols = 700
height = 36
path = '/mnt/ssd2/od/KITTI/training/velodyne'
print ('LiDAR data pre-processing starting...')
# initialize an np 3D array with 1's
indata = np.zeros((rows, cols, height), dtype = np.float32)
# IMPORTANT: CHANGE THE FILE PATH TO THE .so FILE
# create a handle to LidarPreprocess.c
SharedLib = ctypes.cdll.LoadLibrary('./LidarPreprocess.so')
for frameNum in range(1):
# call the C function to create top view maps
# The np array indata will be edited by createTopViewMaps to populate it with the 8 top view maps
cdata = ctypes.c_void_p(indata.ctypes.data)
apath = bytes(os.path.join(path, '000000.bin'), 'utf-8')
tic = time.time()
SharedLib.createTopViewMaps(cdata, apath)
print("Time", time.time()-tic)
check = np.load(str(os.path.join(path, '000000.npy')))
print("diff", (check-indata).sum())
# At this point, the pre-processed current frame is stored in the variable indata which is a 400x400x8 array.
# Pass indata to the rest of the MV3D pipeline.
# Code to visualize the 8 top view maps (optional)
np.save('1', indata)
#cv2.imwrite('gt.png', check[:, :, -1])
#cv2.imwrite('test.png', indata[:, : -1])
# for i in range(8):
# plt.subplot(2, 4, i+1)
# plt.imshow(indata[:,:,i])
# plt.gray()
# plt.show()
print ('LiDAR data pre-processing complete for', frameNum + 1, 'frames')
|
11528671
|
import tensorflow as tf
import numpy as np
def smoothed_metric_loss(input_tensor, name='smoothed_triplet_loss', margin=1):
'''
input_tensor: require a tensor with predefined dimensions (No None dimension)
Every two consecutive vectors must be a positive pair. There
should not be more than one pair from each class.
'''
with tf.variable_scope(name):
# Song et al., Deep Metric Learning via Lifted Structured Feature Embedding
# Define feature X \in \mathbb{R}^{N \times C}
X = input_tensor
m = margin
# Compute the pairwise distance
Xe = tf.expand_dims(X, 1)
Dsq = tf.reduce_sum(tf.square(Xe - tf.transpose(Xe, (1, 0, 2))), 2, keep_dims=False)
D = tf.sqrt(Dsq + 1e-8)
expmD = tf.exp(m - D)
# Compute the loss
# Assume that the input data is aligned in a way that two consecutive data form a pair
batch_size, _ = X.get_shape().as_list()
# L_{ij} = \log (\sum_{i, k} exp\{m - D_{ik}\} + \sum_{j, l} exp\{m - D_{jl}\}) + D_{ij}
# L = \frac{1}{2|P|}\sum_{(i,j)\in P} \max(0, J_{i,j})^2
J_all = []
for pair_ind in range(batch_size // 2):
i = pair_ind * 2
j = i + 1
ind_rest = np.hstack([np.arange(0, pair_ind * 2),
np.arange(pair_ind * 2 + 2, batch_size)])
inds = [[i, k] for k in ind_rest]
inds.extend([[j, l] for l in ind_rest])
J_ij = tf.log(tf.reduce_sum(tf.gather_nd(expmD, inds))) + tf.gather_nd(D, [[i, j]])
J_all.append(J_ij)
J_all = tf.convert_to_tensor(J_all)
loss = tf.divide(tf.reduce_mean(tf.square(tf.maximum(J_all, 0))), 2.0, name='metric_loss')
tf.add_to_collection(tf.GraphKeys.LOSSES, loss)
return loss
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.