hexsha stringlengths 40 40 | size int64 4 1.02M | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 209 | max_stars_repo_name stringlengths 5 121 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 209 | max_issues_repo_name stringlengths 5 121 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 209 | max_forks_repo_name stringlengths 5 121 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 1.02M | avg_line_length float64 1.07 66.1k | max_line_length int64 4 266k | alphanum_fraction float64 0.01 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
682d918e5754cd3360545570fca72b7de87ed1cd | 4,646 | py | Python | bert.py | yifanhunter/BERT-E2E-ABSA | ea6b0ee10891df03956012774671077db00dcc59 | [
"Apache-2.0"
] | null | null | null | bert.py | yifanhunter/BERT-E2E-ABSA | ea6b0ee10891df03956012774671077db00dcc59 | [
"Apache-2.0"
] | 1 | 2020-11-19T12:18:06.000Z | 2020-11-19T12:18:06.000Z | bert.py | yifanhunter/BERT-E2E-ABSA | ea6b0ee10891df03956012774671077db00dcc59 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018 Google AI Language, Google Brain and Carnegie Mellon University Authors and the HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from transformers import PreTrainedModel, BertModel, BertConfig, XLNetModel, XLNetConfig
# model map for BERT
from transformers import BERT_PRETRAINED_MODEL_ARCHIVE_MAP, BERT_PRETRAINED_CONFIG_ARCHIVE_MAP
# model map for XLNet
from transformers import XLNET_PRETRAINED_MODEL_ARCHIVE_MAP, XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP
from transformers.modeling_bert import BertEncoder, BertEmbeddings, BertPooler
import torch.nn as nn
from bert_utils import *
class BertLayerNorm(nn.Module):
def __init__(self, hidden_size, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(BertLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.bias = nn.Parameter(torch.zeros(hidden_size))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class XLNetLayerNorm(nn.Module):
def __init__(self, d_model, eps=1e-12):
"""Construct a layernorm module in the TF style (epsilon inside the square root).
"""
super(XLNetLayerNorm, self).__init__()
self.weight = nn.Parameter(torch.ones(d_model))
self.bias = nn.Parameter(torch.zeros(d_model))
self.variance_epsilon = eps
def forward(self, x):
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
return self.weight * x + self.bias
class BertPreTrainedModel(PreTrainedModel):
""" An abstract class to handle weights initialization and
a simple interface for dowloading and loading pretrained models.
"""
config_class = BertConfig
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_bert
base_model_prefix = "bert"
def __init__(self, *inputs, **kwargs):
super(BertPreTrainedModel, self).__init__(*inputs, **kwargs)
def init_weights(self, module):
""" Initialize the weights.
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
elif isinstance(module, BertLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
class XLNetPreTrainedModel(PreTrainedModel):
config_class = XLNetConfig
pretrained_model_archive_map = XLNET_PRETRAINED_MODEL_ARCHIVE_MAP
load_tf_weights = load_tf_weights_in_xlnet
base_model_prefix = 'transformer'
def __init__(self, *inputs, **kwargs):
super(XLNetPreTrainedModel, self).__init__(*inputs, **kwargs)
def init_weights(self, module):
"""
Initialize the weights.
:param module:
:return:
"""
if isinstance(module, (nn.Linear, nn.Embedding)):
# Slightly different from the TF version which uses truncated_normal for initialization
# cf https://github.com/pytorch/pytorch/pull/5617
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if isinstance(module, nn.Linear) and module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, XLNetLayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
elif isinstance(module, XLNetModel):
module.mask_emb.data.normal_(mean=0.0, std=self.config.initializer_range)
| 43.420561 | 119 | 0.69393 |
77db169a17ce5a997f9eae677efd74828a39a19d | 269 | py | Python | Curso_em_Video_Exercicios/ex086.py | Cohuzer/Exercicios-do-Curso-em-Video | 879cbb53c54ba226e12d9972bc28eadcd521fc10 | [
"MIT"
] | null | null | null | Curso_em_Video_Exercicios/ex086.py | Cohuzer/Exercicios-do-Curso-em-Video | 879cbb53c54ba226e12d9972bc28eadcd521fc10 | [
"MIT"
] | null | null | null | Curso_em_Video_Exercicios/ex086.py | Cohuzer/Exercicios-do-Curso-em-Video | 879cbb53c54ba226e12d9972bc28eadcd521fc10 | [
"MIT"
] | null | null | null | matriz = [[], [], []]
pares = soma_3 = 0
for i in range(3):
for j in range(3):
matriz[i].append(int(input(f'Insira o valor da coordenada ({i}, {j}): ')))
for i in range(3):
for j in range(3):
print(f'[ {matriz[i][j]:^5}]', end=' ')
print()
| 24.454545 | 82 | 0.509294 |
2484f6f58e821979812dd117ffaacbbbb03caa09 | 1,387 | py | Python | team-violet-code/storage-provider/src/main.py | shaunkane21/omh-dsu-ri | c00e1bccaa29c4efafbb270e5660d062f591c98b | [
"Apache-2.0"
] | null | null | null | team-violet-code/storage-provider/src/main.py | shaunkane21/omh-dsu-ri | c00e1bccaa29c4efafbb270e5660d062f591c98b | [
"Apache-2.0"
] | null | null | null | team-violet-code/storage-provider/src/main.py | shaunkane21/omh-dsu-ri | c00e1bccaa29c4efafbb270e5660d062f591c98b | [
"Apache-2.0"
] | null | null | null | from flask import Flask
from flask_pymongo import PyMongo
from flask_restx import Api, Resource
from flask_cors import CORS
from bson import ObjectId
import json
import datetime
import os
import logging
import sys
class JSONEncoder(json.JSONEncoder):
''' extend json-encoder class'''
def default(self, o):
if isinstance(o, ObjectId):
return str(o)
if isinstance(o, datetime.datetime):
return str(o)
return json.JSONEncoder.default(self, o)
app = Flask(__name__)
CORS(app)
app.config["MONGO_URI"] = "mongodb://mongodb:27017/sieve"
# app.config['MONGO3_PORT'] = 27019
mongo = PyMongo(app)
app.json_encoder = JSONEncoder
api = Api(app)
#These imports need to be below the path_planning = PathPlanning() and comms_manager = Communications()
from .storage_provider import StorageProvider
print('Hello world!', file=sys.stderr)
print('This is error output', file=sys.stderr)
print('This is standard output', file=sys.stdout)
app.logger.info('testing info log')
if __name__ == '__main__':
"""
AS OF RIGHT NOW IT DOES NOT LOOK LIKE THIS MAIN FUNCTION EVER GETS HIT
"""
print('__main__ Hello world!', file=sys.stderr)
print('__main__ This is error output', file=sys.stderr)
print('__main__ This is standard output', file=sys.stdout)
app.run(debug=True)
app.logger.info('testing info log') | 25.218182 | 103 | 0.710887 |
e0c2e47d8d1640511d80d638c51a181e87bc7f78 | 12,201 | py | Python | openprocurement/tender/esco/tests/auction.py | raccoongang/openprocurement.tender.esco | 1d86442599d60447a8fd2d79f8d9c2baecd7cb8c | [
"Apache-2.0"
] | null | null | null | openprocurement/tender/esco/tests/auction.py | raccoongang/openprocurement.tender.esco | 1d86442599d60447a8fd2d79f8d9c2baecd7cb8c | [
"Apache-2.0"
] | null | null | null | openprocurement/tender/esco/tests/auction.py | raccoongang/openprocurement.tender.esco | 1d86442599d60447a8fd2d79f8d9c2baecd7cb8c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
from copy import deepcopy
from openprocurement.api.tests.base import snitch
from openprocurement.tender.belowthreshold.tests.auction import (
TenderAuctionResourceTestMixin,
TenderLotAuctionResourceTestMixin,
TenderMultipleLotAuctionResourceTestMixin
)
from openprocurement.tender.openeu.tests.auction_blanks import (
# TenderMultipleLotAuctionResourceTest
patch_tender_2lot_auction,
)
from openprocurement.tender.esco.tests.base import (
BaseESCOContentWebTest,
test_features_tender_data,
test_bids,
test_lots,
)
from openprocurement.tender.esco.tests.auction_blanks import (
# TenderAuctionResourceTest
get_tender_auction,
post_tender_auction,
# TenderLotAuctionResourceTest
get_tender_lot_auction,
post_tender_lot_auction,
# TenderMultipleLotAuctionResourceTest
get_tender_lots_auction,
post_tender_lots_auction,
# TenderAuctionFieldsTest
auction_check_NBUdiscountRate,
auction_check_noticePublicationDate,
# TenderFeaturesAuctionResourceTest
get_tender_auction_feature,
post_tender_auction_feature,
# TenderFeaturesLotAuctionResourceTest
get_tender_lot_auction_feature,
post_tender_lot_auction_feature,
# TenderFeaturesMultipleLotAuctionResourceTest,
get_tender_lots_auction_feature,
post_tender_lots_auction_feature,
# TenderSameValueAuctionResourceTest
post_tender_auction_not_changed,
post_tender_auction_reversed,
)
class TenderAuctionResourceTest(BaseESCOContentWebTest, TenderAuctionResourceTestMixin):
#initial_data = tender_data
initial_auth = ('Basic', ('broker', ''))
initial_bids = test_bids
initial_bids[1]['value'] = {'yearlyPaymentsPercentage': 0.9,
'annualCostsReduction': [100] * 21,
'contractDuration': {'years': 10, 'days': 10}}
def setUp(self):
super(TenderAuctionResourceTest, self).setUp()
# switch to active.pre-qualification
self.time_shift('active.pre-qualification')
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {"data": {"id": self.tender_id}})
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json['data']['status'], "active.pre-qualification")
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/tenders/{}/qualifications?acc_token={}'.format(self.tender_id, self.tender_token))
for qualific in response.json['data']:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(
self.tender_id, qualific['id'], self.tender_token), {'data': {"status": "active", "qualified": True, "eligible": True}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token),
{"data": {"status": "active.pre-qualification.stand-still"}})
self.assertEqual(response.status, "200 OK")
# # switch to active.pre-qualification.stand-still
test_get_tender_auction = snitch(get_tender_auction)
test_post_tender_auction = snitch(post_tender_auction)
class TenderSameValueAuctionResourceTest(BaseESCOContentWebTest):
initial_status = 'active.auction'
tenderer_info = deepcopy(test_bids[0]['tenderers'])
initial_bids = [
{
"tenderers": tenderer_info,
"value": {
'yearlyPaymentsPercentage': 0.9,
'annualCostsReduction': [751.5] * 21,
'contractDuration': {'years': 10, 'days': 10}
},
'selfQualified': True,
'selfEligible': True
}
for i in range(3)
]
def setUp(self):
super(TenderSameValueAuctionResourceTest, self).setUp()
# switch to active.pre-qualification
self.set_status('active.pre-qualification', {'status': 'active.tendering'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {"data": {"id": self.tender_id}})
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json['data']['status'], "active.pre-qualification")
self.app.authorization = ('Basic', ('token', ''))
response = self.app.get('/tenders/{}/qualifications'.format(self.tender_id))
for qualific in response.json['data']:
response = self.app.patch_json('/tenders/{}/qualifications/{}'.format(
self.tender_id, qualific['id']), {'data': {"status": "active", "qualified": True, "eligible": True}})
self.assertEqual(response.status, '200 OK')
# switch to active.pre-qualification.stand-still
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token),
{"data": {"status": "active.pre-qualification.stand-still"}})
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json['data']['status'], "active.pre-qualification.stand-still")
# switch to active.auction
self.set_status('active.auction', {'status': 'active.pre-qualification.stand-still'})
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {"data": {"id": self.tender_id}})
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json['data']['status'], "active.auction")
# self.app.authorization = ('Basic', ('token', ''))
test_post_tender_auction_not_changed = snitch(post_tender_auction_not_changed)
test_post_tender_auction_reversed = snitch(post_tender_auction_reversed)
class TenderAuctionFieldsTest(BaseESCOContentWebTest):
#initial_data = tender_data
initial_auth = ('Basic', ('broker', ''))
initial_bids = test_bids
def setUp(self):
super(TenderAuctionFieldsTest, self).setUp()
# switch to active.pre-qualification
self.time_shift('active.pre-qualification')
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {"data": {"id": self.tender_id}})
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json['data']['status'], "active.pre-qualification")
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/tenders/{}/qualifications?acc_token={}'.format(self.tender_id, self.tender_token))
for qualific in response.json['data']:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(
self.tender_id, qualific['id'], self.tender_token), {'data': {"status": "active", "qualified": True, "eligible": True}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token),
{"data": {"status": "active.pre-qualification.stand-still"}})
self.assertEqual(response.status, "200 OK")
# # switch to active.pre-qualification.stand-still
test_auction_check_NBUdiscountRate = snitch(auction_check_NBUdiscountRate)
test_auction_check_noticePublicationDate = snitch(auction_check_noticePublicationDate)
class TenderLotAuctionResourceTest(TenderLotAuctionResourceTestMixin, TenderAuctionResourceTest):
initial_lots = test_lots
# initial_data = test_tender_data
test_get_tender_auction = snitch(get_tender_lot_auction)
test_post_tender_auction = snitch(post_tender_lot_auction)
class TenderMultipleLotAuctionResourceTest(TenderMultipleLotAuctionResourceTestMixin, TenderAuctionResourceTest):
initial_lots = 2 * test_lots
test_get_tender_auction = snitch(get_tender_lots_auction)
test_patch_tender_auction = snitch(patch_tender_2lot_auction)
test_post_tender_auction = snitch(post_tender_lots_auction)
class TenderFeaturesAuctionResourceTest(BaseESCOContentWebTest):
initial_data = test_features_tender_data
tenderer_info = deepcopy(test_bids[0]['tenderers'])
initial_bids = [
{
"parameters": [
{
"code": i["code"],
"value": 0.03,
}
for i in test_features_tender_data['features']
],
"tenderers": tenderer_info,
"value": {
'yearlyPaymentsPercentage': 0.9,
'annualCostsReduction': [100] * 21,
'contractDuration': {'years': 10}
},
'selfQualified': True,
'selfEligible': True
},
{
"parameters": [
{
"code": i["code"],
"value": 0.07,
}
for i in test_features_tender_data['features']
],
"tenderers": tenderer_info,
"value": {
'yearlyPaymentsPercentage': 0.9,
'annualCostsReduction': [100] * 21,
'contractDuration': {'years': 10}
},
'selfQualified': True,
'selfEligible': True
}
]
def setUp(self):
super(TenderFeaturesAuctionResourceTest, self).setUp()
# switch to active.pre-qualification
self.time_shift('active.pre-qualification')
self.app.authorization = ('Basic', ('chronograph', ''))
response = self.app.patch_json('/tenders/{}'.format(self.tender_id), {"data": {"id": self.tender_id}})
self.assertEqual(response.status, "200 OK")
self.assertEqual(response.json['data']['status'], "active.pre-qualification")
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.get('/tenders/{}/qualifications?acc_token={}'.format(self.tender_id, self.tender_token))
for qualific in response.json['data']:
response = self.app.patch_json('/tenders/{}/qualifications/{}?acc_token={}'.format(
self.tender_id, qualific['id'], self.tender_token), {'data': {"status": "active", "qualified": True, "eligible": True}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/tenders/{}?acc_token={}'.format(self.tender_id, self.tender_token),
{"data": {"status": "active.pre-qualification.stand-still"}})
self.assertEqual(response.status, "200 OK")
# # switch to active.pre-qualification.stand-still
test_get_tender_auction = snitch(get_tender_auction_feature)
test_post_tender_auction = snitch(post_tender_auction_feature)
class TenderFeaturesLotAuctionResourceTest(TenderLotAuctionResourceTestMixin, TenderFeaturesAuctionResourceTest):
initial_data = test_features_tender_data
initial_lots = test_lots
test_get_tender_auction = snitch(get_tender_lot_auction_feature)
test_post_tender_auction = snitch(post_tender_lot_auction_feature)
class TenderFeaturesMultipleLotAuctionResourceTest(TenderMultipleLotAuctionResourceTestMixin, TenderFeaturesAuctionResourceTest):
initial_data = test_features_tender_data
initial_lots = 2 * test_lots
test_get_tender_auction = snitch(get_tender_lots_auction_feature)
test_post_tender_auction = snitch(post_tender_lots_auction_feature)
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(TenderAuctionResourceTest))
suite.addTest(unittest.makeSuite(TenderSameValueAuctionResourceTest))
suite.addTest(unittest.makeSuite(TenderAuctionNBUdiscountRateTest))
suite.addTest(unittest.makeSuite(TenderLotAuctionResourceTest))
suite.addTest(unittest.makeSuite(TenderMultipleLotAuctionResourceTest))
suite.addTest(unittest.makeSuite(TenderFeaturesAuctionResourceTest))
return suite
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 44.046931 | 136 | 0.668716 |
f1d565ae2a6fdc1d676ed32d10abd155e43a1a8c | 13,750 | py | Python | tests/test_prokaryote_schema.py | KarrLab/wc_kb | 5eed01810c9f7ccb44256307cdbdfc57663fcb98 | [
"MIT"
] | 3 | 2018-12-24T16:20:05.000Z | 2020-12-22T16:48:18.000Z | tests/test_prokaryote_schema.py | KarrLab/wc_kb | 5eed01810c9f7ccb44256307cdbdfc57663fcb98 | [
"MIT"
] | 31 | 2018-03-23T06:51:44.000Z | 2019-09-29T01:11:31.000Z | tests/test_prokaryote_schema.py | KarrLab/wc_kb | 5eed01810c9f7ccb44256307cdbdfc57663fcb98 | [
"MIT"
] | 5 | 2018-12-15T00:52:14.000Z | 2020-04-29T14:15:08.000Z | """ Tests of the knowledge base schema for prokaryotes
:Author: Balazs Szigeti <balazs.szigeti@mssm.edu>
:Author: Jonathan Karr <jonrkarr@gmail.com>
:Author: Bilal Shaikh <bilal.shaikh@columbia.edu>
:Author: Arthur Goldberg <Arthur.Goldberg@mssm.edu>
:Author: Yin Hoon Chew <yinhoon.chew@mssm.edu>
:Date: 2018-02-07
:Copyright: 2018, Karr Lab
:License: MIT
"""
from wc_kb import core, prokaryote
from wc_onto import onto as kbOnt
from wc_utils.util import chem
import Bio.Alphabet
import Bio.Seq
import Bio.SeqUtils
import mendeleev
import os
import shutil
import tempfile
import unittest
class RnaSpeciesTypeTestCase(unittest.TestCase):
def setUp(self):
self.tmp_dirname = tempfile.mkdtemp()
self.sequence_path = os.path.join(self.tmp_dirname, 'test_seq.fasta')
with open(self.sequence_path, 'w') as f:
f.write('>dna1\nACGTACGTACGTACG\n'
'>dna2\nA\n'
'>dna3\nC\n'
'>dna4\nG\n'
'>dna5\nT\n'
'>dna6\nAAAA\n'
'>dna7\nAACCGGTT\n')
def tearDown(self):
shutil.rmtree(self.tmp_dirname)
def test_constructor(self):
dna1 = core.DnaSpeciesType(id='dna1', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=15)
rna1 = prokaryote.RnaSpeciesType(id='rna1', name='rna1', transcription_units=[
tu1], type=1)
# make sure that only TU can be created that have valid length
# These should throw errors:
#tu2 = prokaryote.TranscriptionUnitLocus(id='tu1', polymer=dna1, start=1, end=20)
#tu2 = prokaryote.TranscriptionUnitLocus(id='tu1', polymer=dna1, start=-3, end=20)
self.assertEqual(rna1.id, 'rna1')
self.assertEqual(rna1.name, 'rna1')
self.assertEqual(rna1.transcription_units, [tu1])
self.assertEqual(rna1.type, 1)
def test_get_seq(self):
dna1 = core.DnaSpeciesType(id='dna1', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=15)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(rna1.get_seq(), dna1.get_seq().transcribe())
dna1 = core.DnaSpeciesType(id='dna2', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=1)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(rna1.get_seq(), dna1.get_seq().transcribe())
dna1 = core.DnaSpeciesType(id='dna7', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=8)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(rna1.get_seq(), dna1.get_seq().transcribe())
def test_get_empirical_formula(self):
dna1 = core.DnaSpeciesType(id='dna2', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=1)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(rna1.get_empirical_formula(),
chem.EmpiricalFormula('C10H12N5O7P'))
dna1 = core.DnaSpeciesType(id='dna3', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=1)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(rna1.get_empirical_formula(),
chem.EmpiricalFormula('C9H12N3O8P'))
dna1 = core.DnaSpeciesType(id='dna4', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=1)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(rna1.get_empirical_formula(),
chem.EmpiricalFormula('C10H12N5O8P'))
dna1 = core.DnaSpeciesType(id='dna5', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=1)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(rna1.get_empirical_formula(),
chem.EmpiricalFormula('C9H11N2O9P'))
dna1 = core.DnaSpeciesType(id='dna6', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=2)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(rna1.get_empirical_formula(),
chem.EmpiricalFormula('C20H23N10O13P2'))
def test_get_charge(self):
dna1 = core.DnaSpeciesType(id='dna6', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=1)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(rna1.get_charge(), -2)
dna1 = core.DnaSpeciesType(id='dna6', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=2)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(rna1.get_charge(), -3)
def test_get_mol_wt(self):
dna1 = core.DnaSpeciesType(id='dna7', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=1)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
exp_mol_wt = \
+ Bio.SeqUtils.molecular_weight(rna1.get_seq()) \
- (rna1.get_len() + 1) * mendeleev.element('H').atomic_weight
self.assertAlmostEqual(rna1.get_mol_wt(), exp_mol_wt, places=1)
dna1 = core.DnaSpeciesType(id='dna7', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=3, end=3)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
exp_mol_wt = \
+ Bio.SeqUtils.molecular_weight(rna1.get_seq()) \
- (rna1.get_len() + 1) * mendeleev.element('H').atomic_weight
self.assertAlmostEqual(rna1.get_mol_wt(), exp_mol_wt, places=1)
dna1 = core.DnaSpeciesType(id='dna7', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=5, end=5)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
exp_mol_wt = \
+ Bio.SeqUtils.molecular_weight(rna1.get_seq()) \
- (rna1.get_len() + 1) * mendeleev.element('H').atomic_weight
self.assertAlmostEqual(rna1.get_mol_wt(), exp_mol_wt, places=1)
# Adding cases that have ,multiple nucleotides
dna1 = core.DnaSpeciesType(id='dna7', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=8)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
exp_mol_wt = \
+ Bio.SeqUtils.molecular_weight(rna1.get_seq()) \
- (rna1.get_len() + 1) * mendeleev.element('H').atomic_weight
self.assertAlmostEqual(rna1.get_mol_wt(), exp_mol_wt, places=1)
dna1 = core.DnaSpeciesType(id='dna1', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=15)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
exp_mol_wt = \
+ Bio.SeqUtils.molecular_weight(rna1.get_seq()) \
- (rna1.get_len() + 1) * mendeleev.element('H').atomic_weight
self.assertAlmostEqual(rna1.get_mol_wt(), exp_mol_wt, places=1)
class ProteinSpeciesTypeTestCase(unittest.TestCase):
def setUp(self):
# Mycoplasma Genintalium Genome
dna1 = core.DnaSpeciesType(id='chromosome', sequence_path='tests/fixtures/prokaryote_seq.fna')
cell1 = dna1.cell = core.Cell()
cell1.knowledge_base = core.KnowledgeBase(
translation_table=4) # Table 4 is for mycoplasma
# MPN001
self.prot1 = prokaryote.ProteinSpeciesType(id='prot1', cell=cell1)
gene1 = prokaryote.GeneLocus(id='gene1', cell=cell1, proteins=self.prot1, polymer=dna1, start=692, end=1834)
tu1 = prokaryote.TranscriptionUnitLocus(id='tu1', genes=[gene1], polymer=dna1)
# MPN011
self.prot2 = prokaryote.ProteinSpeciesType(id='prot2', cell=cell1)
gene2 = prokaryote.GeneLocus(id='gene2', cell=cell1, proteins=self.prot2, polymer=dna1, start=12838, end=13533, strand=core.PolymerStrand.negative)
tu2 = prokaryote.TranscriptionUnitLocus(id='tu2', genes=[gene2], polymer=dna1)
def test_get_seq(self):
# Use translation table 4 since example genes are from
# Mycoplasma genitallium
# MPN001
self.assertEqual(self.prot1.get_seq()[0:10], 'MKVLINKNEL')
self.assertEqual(self.prot1.get_seq()[-10:], 'ELKEILVPSK')
# MPN011
self.assertEqual(self.prot2.get_seq()[0:10], 'MKFKFLLTPL')
self.assertEqual(self.prot2.get_seq()[-10:], 'LFRYLVYLIE')
def test_get_empirical_formula(self):
# MPN001
self.assertEqual(self.prot1.get_empirical_formula(),
chem.EmpiricalFormula('C1980H3146N510O596S7'))
# MPN011
self.assertEqual(self.prot2.get_empirical_formula(),
chem.EmpiricalFormula('C1246H1928N306O352S3'))
def test_get_mol_wt(self):
# MPN001
self.assertAlmostEqual(self.prot1.get_mol_wt(), 43856.342, delta=0.3)
# MNP011
self.assertAlmostEqual(self.prot2.get_mol_wt(), 26923.100, delta=0.3)
def test_get_charge(self):
self.assertEqual(self.prot1.get_charge(), 1)
self.assertEqual(self.prot2.get_charge(), 12)
class TranscriptionUnitLocusTestCase(unittest.TestCase):
def setUp(self):
self.tmp_dirname = tempfile.mkdtemp()
self.sequence_path = os.path.join(self.tmp_dirname, 'test_seq.fasta')
with open(self.sequence_path, 'w') as f:
f.write('>dna1\nACGTACGTACGTACG\n')
def tearDown(self):
shutil.rmtree(self.tmp_dirname)
def test_get_3_prime(self):
dna1 = core.DnaSpeciesType(id='dna1', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=15, strand=core.PolymerStrand.positive)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(tu1.get_3_prime(), 15)
dna1 = core.DnaSpeciesType(id='dna1', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=15, strand=core.PolymerStrand.negative)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(tu1.get_3_prime(), 1)
def test_get_5_prime(self):
dna1 = core.DnaSpeciesType(id='dna1', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=15, strand=core.PolymerStrand.positive)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(tu1.get_5_prime(), 1)
dna1 = core.DnaSpeciesType(id='dna1', sequence_path=self.sequence_path)
tu1 = prokaryote.TranscriptionUnitLocus(
id='tu1', polymer=dna1, start=1, end=15, strand=core.PolymerStrand.negative)
rna1 = prokaryote.RnaSpeciesType(
id='rna1', name='rna1', transcription_units=[tu1])
self.assertEqual(tu1.get_5_prime(), 15)
class GeneLocusTestCase(unittest.TestCase):
def test_get_direction(self):
gene1 = prokaryote.GeneLocus(id='gene1', name='gene1', symbol='gene_1',
strand=core.PolymerStrand.positive, start=1, end=2)
gene2 = prokaryote.GeneLocus(id='gene2', name='gene2',
strand=core.PolymerStrand.positive, start=10, end=5)
self.assertEqual(gene1.id, 'gene1')
self.assertEqual(gene1.name, 'gene1')
self.assertEqual(gene1.symbol, 'gene_1')
self.assertEqual(gene1.start, 1)
self.assertEqual(gene1.end, 2)
self.assertEqual(gene1.get_direction(), core.PolymerDirection.forward)
self.assertEqual(gene2.get_direction(), core.PolymerDirection.reverse)
gene1.strand = core.PolymerStrand.negative
gene2.strand = core.PolymerStrand.negative
self.assertEqual(gene1.get_direction(), core.PolymerDirection.reverse)
self.assertEqual(gene2.get_direction(), core.PolymerDirection.forward)
gene1.start = 15
gene1.end = 15
with self.assertRaises(ValueError):
gene1.get_direction()
| 43.512658 | 155 | 0.645818 |
b23bc9fefd6e31495b214566b86dc4b9dc3b4bfd | 2,008 | py | Python | desktop/main.py | Embedded-AMS/EmbeddedProto_Example_MSP430_SD_Logger | f20df5d45759e5a118790821508063a698f47f34 | [
"CECILL-B"
] | 1 | 2021-11-06T23:37:43.000Z | 2021-11-06T23:37:43.000Z | desktop/main.py | Embedded-AMS/EmbeddedProto_Example_MSP430_SD_Logger | f20df5d45759e5a118790821508063a698f47f34 | [
"CECILL-B"
] | null | null | null | desktop/main.py | Embedded-AMS/EmbeddedProto_Example_MSP430_SD_Logger | f20df5d45759e5a118790821508063a698f47f34 | [
"CECILL-B"
] | null | null | null | #
# Copyright (C) 2020-2021 Embedded AMS B.V. - All Rights Reserved
#
# This file is part of Embedded Proto.
#
# Embedded Proto is open source software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License as published
# by the Free Software Foundation, version 3 of the license.
#
# Embedded Proto is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Embedded Proto. If not, see <https://www.gnu.org/licenses/>.
#
# For commercial and closed source application please visit:
# <https://EmbeddedProto.com/license/>.
#
# Embedded AMS B.V.
# Info:
# info at EmbeddedProto dot com
#
# Postal address:
# Johan Huizingalaan 763a
# 1066 VH, Amsterdam
# the Netherlands
#
import argparse
from generated import sd_messages_pb2
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--path', default="", help='The path to the logged embeddedproto messages')
args = parser.parse_args()
with open(args.path, 'rb') as f:
data = f.read()
# Split the data at the delimter combination of ETB and new line.
data = data.split(b'\x17\n')
print(data)
prev_count = -1
for d in data:
log_msg = sd_messages_pb2.Log()
log_msg.ParseFromString(d)
print("count: " + str(log_msg.count))
print("range: " + str(log_msg.range))
print("active: " + str(log_msg.active))
print("temperature: " + str(log_msg.temperature))
print("speed: " + str(log_msg.speed))
print("\n")
if (prev_count + 1) != log_msg.count:
break;
prev_count = log_msg.count
| 30.892308 | 105 | 0.63994 |
80ff75dc2c838958ab76abdb07e6ece549cb3896 | 9,819 | py | Python | docs/conf.py | 4383/Botanick | 793908f8e2ea72a8fe9c5bf47a4482359564a8c0 | [
"MIT"
] | null | null | null | docs/conf.py | 4383/Botanick | 793908f8e2ea72a8fe9c5bf47a4482359564a8c0 | [
"MIT"
] | 3 | 2016-11-11T15:05:07.000Z | 2021-11-15T17:46:52.000Z | docs/conf.py | 4383/Botanick | 793908f8e2ea72a8fe9c5bf47a4482359564a8c0 | [
"MIT"
] | 1 | 2016-11-10T18:31:47.000Z | 2016-11-10T18:31:47.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Botanick documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 10 17:24:38 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('../botanick/'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Botanick'
copyright = '2016, Adrien VIDOT'
author = 'Adrien VIDOT'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = 'Botanick v0.1.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Botanickdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Botanick.tex', 'Botanick Documentation',
'Adrien VIDOT', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'botanick', 'Botanick Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Botanick', 'Botanick Documentation',
author, 'Botanick', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
| 28.879412 | 80 | 0.702923 |
3c9b727f347ad09f6f5aeeb9ca0de8bb860f26ee | 11,917 | py | Python | huntserver/migrations/0021_auto_20180402_2224.py | adenylyl/pi_day_puzzle_hunt | aa01cef427bc5f524e89558da72a2f79b0c78514 | [
"MIT"
] | 18 | 2017-03-07T19:53:03.000Z | 2022-02-24T04:58:47.000Z | huntserver/migrations/0021_auto_20180402_2224.py | adenylyl/pi_day_puzzle_hunt | aa01cef427bc5f524e89558da72a2f79b0c78514 | [
"MIT"
] | 161 | 2016-11-14T00:04:42.000Z | 2021-06-10T17:25:17.000Z | huntserver/migrations/0021_auto_20180402_2224.py | adenylyl/pi_day_puzzle_hunt | aa01cef427bc5f524e89558da72a2f79b0c78514 | [
"MIT"
] | 22 | 2016-09-27T18:00:10.000Z | 2022-03-13T17:51:44.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('huntserver', '0020_auto_20171008_2203'),
]
operations = [
migrations.AlterField(
model_name='hunt',
name='end_date',
field=models.DateTimeField(help_text=b'The date/time at which a hunt will be archived and available to the public'),
),
migrations.AlterField(
model_name='hunt',
name='hunt_name',
field=models.CharField(help_text=b'The name of the hunt as the public will see it', max_length=200),
),
migrations.AlterField(
model_name='hunt',
name='hunt_number',
field=models.IntegerField(help_text=b'A number used internally for hunt sorting, must be unique', unique=True),
),
migrations.AlterField(
model_name='hunt',
name='location',
field=models.CharField(help_text=b'Starting location of the puzzlehunt', max_length=100),
),
migrations.AlterField(
model_name='hunt',
name='start_date',
field=models.DateTimeField(help_text=b'The date/time at which a hunt will become visible to registered users'),
),
migrations.AlterField(
model_name='hunt',
name='template',
field=models.TextField(default=b'', help_text=b'The template string to be rendered to HTML on the hunt page'),
),
migrations.AlterField(
model_name='message',
name='is_response',
field=models.BooleanField(help_text=b'A boolean representing whether or not the message is from the staff'),
),
migrations.AlterField(
model_name='message',
name='team',
field=models.ForeignKey(help_text=b'The team that this message is being sent to/from', to='huntserver.Team', on_delete=models.CASCADE),
),
migrations.AlterField(
model_name='message',
name='text',
field=models.CharField(help_text=b'Message text', max_length=400),
),
migrations.AlterField(
model_name='message',
name='time',
field=models.DateTimeField(help_text=b'Message send time'),
),
migrations.AlterField(
model_name='person',
name='allergies',
field=models.CharField(help_text=b'Allergy information for the person', max_length=400, blank=True),
),
migrations.AlterField(
model_name='person',
name='comments',
field=models.CharField(help_text=b'Comments or other notes about the person', max_length=400, blank=True),
),
migrations.AlterField(
model_name='person',
name='is_shib_acct',
field=models.BooleanField(help_text=b'A boolean to indicate if the person uses shibboleth authentication for login'),
),
migrations.AlterField(
model_name='person',
name='phone',
field=models.CharField(help_text=b"Person's phone number, no particular formatting", max_length=20, blank=True),
),
migrations.AlterField(
model_name='person',
name='teams',
field=models.ManyToManyField(help_text=b'Teams that the person is on', to='huntserver.Team', blank=True),
),
migrations.AlterField(
model_name='person',
name='user',
field=models.OneToOneField(to=settings.AUTH_USER_MODEL, help_text=b'The corresponding user to this person', on_delete=models.CASCADE),
),
migrations.AlterField(
model_name='puzzle',
name='answer',
field=models.CharField(help_text=b'The answer to the puzzle, not case sensitive', max_length=100),
),
migrations.AlterField(
model_name='puzzle',
name='hunt',
field=models.ForeignKey(help_text=b'The hunt that this puzzle is a part of', to='huntserver.Hunt', on_delete=models.CASCADE),
),
migrations.AlterField(
model_name='puzzle',
name='link',
field=models.URLField(help_text=b'The full link (needs http://) to a publicly accessible PDF of the puzzle'),
),
migrations.AlterField(
model_name='puzzle',
name='num_pages',
field=models.IntegerField(help_text=b'Number of pages in the PDF for this puzzle. Set automatically upon download'),
),
migrations.AlterField(
model_name='puzzle',
name='num_required_to_unlock',
field=models.IntegerField(default=1, help_text=b'Number of prerequisite puzzles that need to be solved to unlock this puzzle'),
),
migrations.AlterField(
model_name='puzzle',
name='puzzle_id',
field=models.CharField(help_text=b'A 3 character hex string that uniquely identifies the puzzle', unique=True, max_length=8),
),
migrations.AlterField(
model_name='puzzle',
name='puzzle_name',
field=models.CharField(help_text=b'The name of the puzzle as it will be seen by hunt participants', max_length=200),
),
migrations.AlterField(
model_name='puzzle',
name='puzzle_number',
field=models.IntegerField(help_text=b'The number of the puzzle within the hunt, for sorting purposes'),
),
migrations.AlterField(
model_name='puzzle',
name='unlocks',
field=models.ManyToManyField(help_text=b'Puzzles that this puzzle is a possible prerequisite for', to='huntserver.Puzzle', blank=True),
),
migrations.AlterField(
model_name='response',
name='puzzle',
field=models.ForeignKey(help_text=b'The puzzle that this automated response is related to', to='huntserver.Puzzle', on_delete=models.CASCADE),
),
migrations.AlterField(
model_name='response',
name='regex',
field=models.CharField(help_text=b"The python-style regex that will be checked against the user's response", max_length=400),
),
migrations.AlterField(
model_name='response',
name='text',
field=models.CharField(help_text=b'The text to use in the submission response if the regex matched', max_length=400),
),
migrations.AlterField(
model_name='solve',
name='puzzle',
field=models.ForeignKey(help_text=b'The puzzle that this is a solve for', to='huntserver.Puzzle', on_delete=models.CASCADE),
),
migrations.AlterField(
model_name='solve',
name='submission',
field=models.ForeignKey(blank=True, to='huntserver.Submission', help_text=b'The submission object that the team submitted to solve the puzzle', on_delete=models.CASCADE),
),
migrations.AlterField(
model_name='solve',
name='team',
field=models.ForeignKey(help_text=b'The team that this solve is from', to='huntserver.Team', on_delete=models.CASCADE),
),
migrations.AlterField(
model_name='submission',
name='modified_date',
field=models.DateTimeField(help_text=b'Last date/time of response modification'),
),
migrations.AlterField(
model_name='submission',
name='puzzle',
field=models.ForeignKey(help_text=b'The puzzle that this submission is in response to', to='huntserver.Puzzle', on_delete=models.CASCADE),
),
migrations.AlterField(
model_name='submission',
name='response_text',
field=models.CharField(help_text=b'Response to the given answer. Empty string indicates human response needed', max_length=400, blank=True),
),
migrations.AlterField(
model_name='submission',
name='team',
field=models.ForeignKey(help_text=b'The team that made the submission', to='huntserver.Team', on_delete=models.CASCADE),
),
migrations.AlterField(
model_name='team',
name='hunt',
field=models.ForeignKey(help_text=b'The hunt that the team is a part of', to='huntserver.Hunt', on_delete=models.CASCADE),
),
migrations.AlterField(
model_name='team',
name='join_code',
field=models.CharField(help_text=b'The 5 character random alphanumeric password needed for a user to join a team', max_length=5),
),
migrations.AlterField(
model_name='team',
name='location',
field=models.CharField(help_text=b'The physical location that the team is solving at', max_length=80, blank=True),
),
migrations.AlterField(
model_name='team',
name='playtester',
field=models.BooleanField(default=False, help_text=b'A boolean to indicate if the team is a playtest team and will get early access'),
),
migrations.AlterField(
model_name='team',
name='solved',
field=models.ManyToManyField(help_text=b'The puzzles the team has solved', related_name='solved_for', through='huntserver.Solve', to='huntserver.Puzzle', blank=True),
),
migrations.AlterField(
model_name='team',
name='team_name',
field=models.CharField(help_text=b'The team name as it will be shown to hunt participants', max_length=200),
),
migrations.AlterField(
model_name='team',
name='unlockables',
field=models.ManyToManyField(help_text=b'The unlockables the team has earned', to='huntserver.Unlockable', blank=True),
),
migrations.AlterField(
model_name='team',
name='unlocked',
field=models.ManyToManyField(help_text=b'The puzzles the team has unlocked', related_name='unlocked_for', through='huntserver.Unlock', to='huntserver.Puzzle', blank=True),
),
migrations.AlterField(
model_name='unlock',
name='puzzle',
field=models.ForeignKey(help_text=b'The puzzle that this is an unlock for', to='huntserver.Puzzle', on_delete=models.CASCADE),
),
migrations.AlterField(
model_name='unlock',
name='team',
field=models.ForeignKey(help_text=b'The team that this unlocked puzzle is for', to='huntserver.Team', on_delete=models.CASCADE),
),
migrations.AlterField(
model_name='unlock',
name='time',
field=models.DateTimeField(help_text=b'The time this puzzle was unlocked for this team'),
),
migrations.AlterField(
model_name='unlockable',
name='content',
field=models.CharField(help_text=b'The link to the content, files must be externally hosted.', max_length=500),
),
migrations.AlterField(
model_name='unlockable',
name='content_type',
field=models.CharField(default=b'TXT', help_text=b"The type of object that is to be unlocked, can be 'IMG', 'PDF', 'TXT', or 'WEB'", max_length=3, choices=[(b'IMG', b'Image'), (b'PDF', b'PDF'), (b'TXT', b'Text'), (b'WEB', b'Link')]),
),
migrations.AlterField(
model_name='unlockable',
name='puzzle',
field=models.ForeignKey(help_text=b'The puzzle that needs to be solved to unlock this object', to='huntserver.Puzzle', on_delete=models.CASCADE),
),
]
| 45.659004 | 245 | 0.611228 |
ea0c100dba080fe3094c26f8116fc6e63891395e | 948 | py | Python | pages/themes/beginners/parallelProgramming/examples/threading/countdown_in_a_file.py | ProgressBG-Python-Course/ProgressBG-VC2-Python | 03b892a42ee1fad3d4f97e328e06a4b1573fd356 | [
"MIT"
] | null | null | null | pages/themes/beginners/parallelProgramming/examples/threading/countdown_in_a_file.py | ProgressBG-Python-Course/ProgressBG-VC2-Python | 03b892a42ee1fad3d4f97e328e06a4b1573fd356 | [
"MIT"
] | null | null | null | pages/themes/beginners/parallelProgramming/examples/threading/countdown_in_a_file.py | ProgressBG-Python-Course/ProgressBG-VC2-Python | 03b892a42ee1fad3d4f97e328e06a4b1573fd356 | [
"MIT"
] | null | null | null | import threading
import time
def countdown(n, ouptut_file):
while n > 0:
n -= 1
def countdown_in_file(n, ouptut_file):
with open(ouptut_file, "a") as f:
f.write(threadID+"\n")
while n > 0:
n -= 1
f.write("{}".format(n))
ouptut_file = "countdown.txt"
count = 100_000_000
#################################################
# Sequential Processing:
#################################################
t = time.time()
countdown(count, ouptut_file)
countdown(count, ouptut_file)
print("Sequential Processing took:",time.time() - t,"\n")
#################################################
# Multithreaded Processing:
#################################################
t = time.time()
tr1 = threading.Thread(target=countdown, args=(count,ouptut_file))
tr2 = threading.Thread(target=countdown, args=(count,ouptut_file))
tr1.start();tr2.start()
tr1.join(); tr2.join()
print("Multithreaded Processing took:",time.time() - t) | 23.7 | 66 | 0.544304 |
de81cc3135f0c34ca644eb9cc1a4a1f65f6c8589 | 1,837 | py | Python | config.py | awaemmanuel/2020-S109A | 7a141df45ea5bc224dc05a7e86747a08dc18054c | [
"MIT"
] | 25 | 2020-06-22T16:15:04.000Z | 2021-01-01T06:13:04.000Z | config.py | awaemmanuel/2020-S109A | 7a141df45ea5bc224dc05a7e86747a08dc18054c | [
"MIT"
] | null | null | null | config.py | awaemmanuel/2020-S109A | 7a141df45ea5bc224dc05a7e86747a08dc18054c | [
"MIT"
] | 30 | 2020-06-21T19:20:56.000Z | 2021-08-08T00:46:31.000Z | COURSE_NAME = 'S109A'
AUTHOR = 'Kevin Rader'
SITEURL = 'https://harvard-iacs.github.io/2020-S109A'
GITHUB = 'https://github.com/Harvard-IACS/2020-S109A'
COLOR = '#8996A0'
MENUITEMS = [
('Syllabus', 'pages/syllabus.html'),
('Calendars', 'pages/calendars.html'),
('Schedule', 'pages/schedule.html'),
('Materials', 'pages/materials.html'),
#('FAQ', 'pages/faq.html'),
#('Resources', 'pages/resources.html')
# ('Sections', 'category/sections.html')
]
PATH = 'content'
OUTPUT_PATH = 'docs'
TIMEZONE = 'EST'
DEFAULT_LANG = 'en'
FEED_ALL_ATOM = None
CATEGORY_FEED_ATOM = None
TRANSLATION_FEED_ATOM = None
AUTHOR_FEED_ATOM = None
AUTHOR_FEED_RSS = None
DEFAULT_PAGINATION = 10
DEFAULT_CATEGORY = 'pages'
AUTHORS_SAVE_AS = ''
CATEGORIES_SAVE_AS = ''
ARCHIVES_SAVE_AS = ''
ARTICLE_SAVE_AS = '{category}/{slug}/index.html'
ARTICLE_URL = '{category}/{slug}/'
AUTHOR_URL = ''
AUTHOR_SAVE_AS = ''
INDEX_SAVE_AS = 'pages/materials.html'
THEME_STATIC_DIR = 'style'
DELETE_OUTPUT_DIRECTORY = True
MARKUP = ['md', 'ipynb']
PLUGIN_PATHS = ['plugins']
PLUGINS = ['ipynb.markup', 'tipue_search']
IGNORE_FILES = ['.ipynb_checkpoints', 'README.md', "*.html"]
# Uncomment following line if you want document-relative URLs when developing
RELATIVE_URLS = True
STATIC_PATHS = ['lectures', 'labs', 'homeworks', 'a-sec', 'sections', 'wiki', 'images', 'projects', 'slides', 'data']
DIRECT_TEMPLATES = ['index', 'category', 'tags', 'search']
import re
JINJA_FILTERS = {
'original_content': lambda x: re.search(r"content/(.*)", x).group(1)
}
USE_FOLDER_AS_CATEGORY = False
CACHE_CONTENT = True
import logging
LOG_FILTER = [
(logging.WARN, "Empty alt attribute for image %s in %s"),
(logging.WARN, "Meta tag in file %s does not have a 'name' attribute, skipping. Attributes: content=%s")
]
| 19.336842 | 117 | 0.685357 |
3afdc733c827a659288e651d0b0ba3bf2d5a0b41 | 11,375 | py | Python | third_party/ros_aarch64/lib/python2.7/dist-packages/trajectory_msgs/msg/_MultiDOFJointTrajectoryPoint.py | silverland79/apollo1.0 | 6e725e8dd5013b769efa18f43e5ae675f4847fbd | [
"Apache-2.0"
] | 2 | 2018-01-29T03:10:39.000Z | 2020-12-08T09:08:41.000Z | third_party/ros_x86_64/lib/python2.7/dist-packages/trajectory_msgs/msg/_MultiDOFJointTrajectoryPoint.py | silverland79/apollo1.0 | 6e725e8dd5013b769efa18f43e5ae675f4847fbd | [
"Apache-2.0"
] | null | null | null | third_party/ros_x86_64/lib/python2.7/dist-packages/trajectory_msgs/msg/_MultiDOFJointTrajectoryPoint.py | silverland79/apollo1.0 | 6e725e8dd5013b769efa18f43e5ae675f4847fbd | [
"Apache-2.0"
] | 3 | 2018-01-29T12:22:56.000Z | 2020-12-08T09:08:46.000Z | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from trajectory_msgs/MultiDOFJointTrajectoryPoint.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import genpy
class MultiDOFJointTrajectoryPoint(genpy.Message):
_md5sum = "3ebe08d1abd5b65862d50e09430db776"
_type = "trajectory_msgs/MultiDOFJointTrajectoryPoint"
_has_header = False #flag to mark the presence of a Header object
_full_text = """# Each multi-dof joint can specify a transform (up to 6 DOF)
geometry_msgs/Transform[] transforms
# There can be a velocity specified for the origin of the joint
geometry_msgs/Twist[] velocities
# There can be an acceleration specified for the origin of the joint
geometry_msgs/Twist[] accelerations
duration time_from_start
================================================================================
MSG: geometry_msgs/Transform
# This represents the transform between two coordinate frames in free space.
Vector3 translation
Quaternion rotation
================================================================================
MSG: geometry_msgs/Vector3
# This represents a vector in free space.
# It is only meant to represent a direction. Therefore, it does not
# make sense to apply a translation to it (e.g., when applying a
# generic rigid transformation to a Vector3, tf2 will only apply the
# rotation). If you want your data to be translatable too, use the
# geometry_msgs/Point message instead.
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
================================================================================
MSG: geometry_msgs/Twist
# This expresses velocity in free space broken into its linear and angular parts.
Vector3 linear
Vector3 angular
"""
__slots__ = ['transforms','velocities','accelerations','time_from_start']
_slot_types = ['geometry_msgs/Transform[]','geometry_msgs/Twist[]','geometry_msgs/Twist[]','duration']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
transforms,velocities,accelerations,time_from_start
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(MultiDOFJointTrajectoryPoint, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.transforms is None:
self.transforms = []
if self.velocities is None:
self.velocities = []
if self.accelerations is None:
self.accelerations = []
if self.time_from_start is None:
self.time_from_start = genpy.Duration()
else:
self.transforms = []
self.velocities = []
self.accelerations = []
self.time_from_start = genpy.Duration()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
length = len(self.transforms)
buff.write(_struct_I.pack(length))
for val1 in self.transforms:
_v1 = val1.translation
_x = _v1
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v2 = val1.rotation
_x = _v2
buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
length = len(self.velocities)
buff.write(_struct_I.pack(length))
for val1 in self.velocities:
_v3 = val1.linear
_x = _v3
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v4 = val1.angular
_x = _v4
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
length = len(self.accelerations)
buff.write(_struct_I.pack(length))
for val1 in self.accelerations:
_v5 = val1.linear
_x = _v5
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v6 = val1.angular
_x = _v6
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_x = self
buff.write(_struct_2i.pack(_x.time_from_start.secs, _x.time_from_start.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.transforms is None:
self.transforms = None
if self.velocities is None:
self.velocities = None
if self.accelerations is None:
self.accelerations = None
if self.time_from_start is None:
self.time_from_start = genpy.Duration()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.transforms = []
for i in range(0, length):
val1 = geometry_msgs.msg.Transform()
_v7 = val1.translation
_x = _v7
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v8 = val1.rotation
_x = _v8
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
self.transforms.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.velocities = []
for i in range(0, length):
val1 = geometry_msgs.msg.Twist()
_v9 = val1.linear
_x = _v9
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v10 = val1.angular
_x = _v10
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
self.velocities.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.accelerations = []
for i in range(0, length):
val1 = geometry_msgs.msg.Twist()
_v11 = val1.linear
_x = _v11
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v12 = val1.angular
_x = _v12
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
self.accelerations.append(val1)
_x = self
start = end
end += 8
(_x.time_from_start.secs, _x.time_from_start.nsecs,) = _struct_2i.unpack(str[start:end])
self.time_from_start.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
length = len(self.transforms)
buff.write(_struct_I.pack(length))
for val1 in self.transforms:
_v13 = val1.translation
_x = _v13
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v14 = val1.rotation
_x = _v14
buff.write(_struct_4d.pack(_x.x, _x.y, _x.z, _x.w))
length = len(self.velocities)
buff.write(_struct_I.pack(length))
for val1 in self.velocities:
_v15 = val1.linear
_x = _v15
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v16 = val1.angular
_x = _v16
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
length = len(self.accelerations)
buff.write(_struct_I.pack(length))
for val1 in self.accelerations:
_v17 = val1.linear
_x = _v17
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_v18 = val1.angular
_x = _v18
buff.write(_struct_3d.pack(_x.x, _x.y, _x.z))
_x = self
buff.write(_struct_2i.pack(_x.time_from_start.secs, _x.time_from_start.nsecs))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.transforms is None:
self.transforms = None
if self.velocities is None:
self.velocities = None
if self.accelerations is None:
self.accelerations = None
if self.time_from_start is None:
self.time_from_start = genpy.Duration()
end = 0
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.transforms = []
for i in range(0, length):
val1 = geometry_msgs.msg.Transform()
_v19 = val1.translation
_x = _v19
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v20 = val1.rotation
_x = _v20
start = end
end += 32
(_x.x, _x.y, _x.z, _x.w,) = _struct_4d.unpack(str[start:end])
self.transforms.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.velocities = []
for i in range(0, length):
val1 = geometry_msgs.msg.Twist()
_v21 = val1.linear
_x = _v21
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v22 = val1.angular
_x = _v22
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
self.velocities.append(val1)
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.accelerations = []
for i in range(0, length):
val1 = geometry_msgs.msg.Twist()
_v23 = val1.linear
_x = _v23
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
_v24 = val1.angular
_x = _v24
start = end
end += 24
(_x.x, _x.y, _x.z,) = _struct_3d.unpack(str[start:end])
self.accelerations.append(val1)
_x = self
start = end
end += 8
(_x.time_from_start.secs, _x.time_from_start.nsecs,) = _struct_2i.unpack(str[start:end])
self.time_from_start.canon()
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_4d = struct.Struct("<4d")
_struct_2i = struct.Struct("<2i")
_struct_3d = struct.Struct("<3d")
| 34.159159 | 145 | 0.607736 |
bc87fde94675823b1094fb030af95101519345af | 4,610 | py | Python | tensorflow/lite/testing/generate_examples.py | yage99/tensorflow | c7fa71b32a3635eb25596ae80d007b41007769c4 | [
"Apache-2.0"
] | 78 | 2020-08-04T12:36:25.000Z | 2022-03-25T04:23:40.000Z | tensorflow/lite/testing/generate_examples.py | sseung0703/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | [
"Apache-2.0"
] | 203 | 2019-06-14T23:53:10.000Z | 2022-02-10T02:27:23.000Z | tensorflow/lite/testing/generate_examples.py | sseung0703/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | [
"Apache-2.0"
] | 66 | 2020-05-15T10:05:12.000Z | 2022-02-14T07:28:18.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generate a series of TensorFlow graphs that become tflite test cases.
Usage:
generate_examples <output directory>
bazel run //tensorflow/lite/testing:generate_examples
To more easily debug failures use (or override) the --save_graphdefs flag to
place text proto graphdefs into the generated zip files.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
import argparse
import os
import sys
from tensorflow.lite.testing import generate_examples_lib
from tensorflow.lite.testing import toco_convert
# TODO(aselle): Disable GPU for now
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
parser = argparse.ArgumentParser(description="Script to generate TFLite tests.")
parser.add_argument("output_path",
help="Directory where the outputs will be go.")
parser.add_argument(
"--zip_to_output",
type=str,
help="Particular zip to output.",
required=True)
parser.add_argument("--toco",
type=str,
help="Path to toco tool.",
required=True)
parser.add_argument(
"--known_bugs_are_errors",
action="store_true",
help=("If a particular model is affected by a known bug,"
" count it as a converter error."))
parser.add_argument(
"--ignore_converter_errors",
action="store_true",
help="Raise an exception if any converter error is encountered.")
parser.add_argument(
"--save_graphdefs",
action="store_true",
help="Include intermediate graphdefs in the output zip files.")
parser.add_argument(
"--run_with_flex",
action="store_true",
help="Whether the TFLite Flex converter is being used.")
parser.add_argument(
"--make_edgetpu_tests",
action="store_true",
help="Whether to generate test cases for edgetpu.")
parser.add_argument(
"--make_forward_compat_test",
action="store_true",
help="Make tests by setting TF forward compatibility horizon to the future")
parser.add_argument(
"--no_tests_limit",
action="store_true",
help="Remove the limit of the number of tests.")
parser.add_argument(
"--no_conversion_report",
action="store_true",
help="Do not create conversion report.")
parser.add_argument(
"--test_sets",
type=str,
help=("Comma-separated list of test set names to generate. "
"If not specified, a test set is selected by parsing the name of "
"'zip_to_output' file."))
# Toco binary path provided by the generate rule.
bin_path = None
def main(unused_args):
# Eager execution is enabled by default in TF 2.0, but generated example
# tests are still using non-eager features (e.g. `tf.placeholder`).
tf.compat.v1.disable_eager_execution()
options = generate_examples_lib.Options()
options.output_path = FLAGS.output_path
options.zip_to_output = FLAGS.zip_to_output
options.toco = FLAGS.toco
options.known_bugs_are_errors = FLAGS.known_bugs_are_errors
options.ignore_converter_errors = FLAGS.ignore_converter_errors
options.save_graphdefs = FLAGS.save_graphdefs
options.run_with_flex = FLAGS.run_with_flex
options.make_edgetpu_tests = FLAGS.make_edgetpu_tests
options.make_forward_compat_test = FLAGS.make_forward_compat_test
options.tflite_convert_function = toco_convert.toco_convert
options.no_tests_limit = FLAGS.no_tests_limit
options.no_conversion_report = FLAGS.no_conversion_report
if FLAGS.test_sets:
test_sets = FLAGS.test_sets.split(",")
generate_examples_lib.generate_multi_set_examples(options, test_sets)
else:
generate_examples_lib.generate_examples(options)
if __name__ == "__main__":
FLAGS, unparsed = parser.parse_known_args()
if unparsed:
parser.print_usage()
print("\nGot the following unparsed args, %r please fix.\n" % unparsed)
exit(1)
else:
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 33.897059 | 80 | 0.726464 |
4797555bc2e4165be031cff29a2ed4e8d81858a3 | 6,658 | py | Python | ros-spine-control/src/opencv_work/scripts/backups/2019-01-21/opencv_object_tracker_backup_2018_11.28.py | BerkeleyExpertSystemTechnologiesLab/2d-spine-control-hardware | 3fc2cdaeabf3b8ab2596e22bbd1912d84e9f062b | [
"Apache-2.0"
] | 1 | 2021-04-01T22:33:40.000Z | 2021-04-01T22:33:40.000Z | ros-spine-control/src/opencv_work/scripts/backups/2019-01-21/opencv_object_tracker_backup_2018_11.28.py | GusSosa/ROS_navMod | 719cb233f7d386a60f63c4ed9fd1c8394f1b0b97 | [
"Apache-2.0"
] | null | null | null | ros-spine-control/src/opencv_work/scripts/backups/2019-01-21/opencv_object_tracker_backup_2018_11.28.py | GusSosa/ROS_navMod | 719cb233f7d386a60f63c4ed9fd1c8394f1b0b97 | [
"Apache-2.0"
] | 2 | 2020-03-21T20:24:37.000Z | 2022-01-28T08:27:55.000Z | # Ubuntu 18.04: How to install OpenCVShell
# $ cd ~
# $ workon cv
# $ python
# Python 3.6.5 (default, Apr 1 2018, 05:46:30)
# [GCC 7.3.0] on linux
# Type "help", "copyright", "credits" or "license" for more information.
# >>> import cv2
# >>> cv2.__version__
# '3.4.1'
# >>> quit()
# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import time
import cv2
import numpy as np
def object_tracker():
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
# use for video file
ap.add_argument("-v", "--video", type=str,
help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf",
help="OpenCV object tracker type")
args = vars(ap.parse_args())
# extract the OpenCV version info
(major, minor) = cv2.__version__.split(".")[:2]
print(major)
print(minor)
# if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
# function to create our object tracker
if int(major) == 3 and int(minor) < 3:
tracker = cv2.Tracker_create(args["tracker"].upper())
# otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
# approrpiate object tracker constructor:
else:
# initialize a dictionary that maps strings to their corresponding
# OpenCV object tracker implementations
OPENCV_OBJECT_TRACKERS = {
"csrt": cv2.TrackerCSRT_create,
"kcf": cv2.TrackerKCF_create,
"boosting": cv2.TrackerBoosting_create,
"mil": cv2.TrackerMIL_create,
"tld": cv2.TrackerTLD_create,
"medianflow": cv2.TrackerMedianFlow_create,
"mosse": cv2.TrackerMOSSE_create
}
# grab the appropriate object tracker using our dictionary of
# OpenCV object tracker objects
tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
# initialize the bounding box coordinates of the object we are going
# to track, and scale object
initBB = None
initScale = None
x_pix_com = []
y_pix_com = []
# if a video path was not supplied, grab the reference to the web cam
# to use PC webcam, change src=0
# to use connected USB camera, change src=1 or src=2...
if not args.get("video", False):
print("[INFO] starting video stream...")
vs = VideoStream(src=0).start()
time.sleep(1.0)
# otherwise, grab a reference to the video file
else:
vs = cv2.VideoCapture(args["video"])
# initialize the FPS throughput estimator
fps = None
#com_output = input('Output COM info? <Y/N>')
print('Press <S> in the "Frame" window to select ROI')
# loop over frames from the video stream
while True:
# grab the current frame, then handle if we are using a
# VideoStream or VideoCapture object
frame = vs.read()
frame = frame[1] if args.get("video", False) else frame
# check to see if we have reached the end of the stream
if frame is None:
break
# resize the frame (so we can process it faster) and grab the
# frame dimensions
# frame = imutils.resize(frame, width=500)
(H, W) = frame.shape[:2]
# check to see if we are currently tracking an object
if initBB is not None:
# grab the new bounding box coordinates of the object
(success, box) = tracker.update(frame)
# check to see if the tracking was a success
if success:
(x, y, w, h) = [int(v) for v in box]
cv2.rectangle(frame, (x, y), (x + w, y + h),
(0, 255, 0), 2)
# check to see if user wants the center of mass info displayed
if 3 > 2:
# if (com_output == 'Y') or (com_output == 'y'):
# calculate and print COM info for tracked object (pixel
# coordinates, relative to the upper left corner)
x_pix_com.append(x + w / 2)
y_pix_com.append(y + h / 2)
#print('X coordinate COM = ' + str(x + w / 2))
#print('Y coordinate COM = ' + str(y + h / 2))
# update the FPS counter
fps.update()
fps.stop()
# initialize the set of information we'll be displaying on
# the frame
info = [
("Tracker", args["tracker"]),
("Success", "Yes" if success else "No"),
("FPS", "{:.2f}".format(fps.fps())),
]
# loop over the info tuples and draw them on our frame
for (i, (k, v)) in enumerate(info):
text = "{}: {}".format(k, v)
cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
# show the output frame
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
# if the 't' key is selected, we are going to "select" a bounding
# box to determine object scale, then get user input scale
if key == ord('t'):
# select bounding box of object to scale
initScale = cv2.selectROI("Frame", frame, fromCenter=False,
showCrosshair=True)
# if the 's' key is selected, we are going to "select" a bounding
# box to track
if key == ord("s"):
# select the bounding box of the object we want to track (make
# sure you press ENTER or SPACE after selecting the ROI)
initBB = cv2.selectROI("Frame", frame, fromCenter=False,
showCrosshair=True)
# start OpenCV object tracker using the supplied bounding box
# coordinates, then start the FPS throughput estimator as well
tracker.init(frame, initBB)
fps = FPS().start()
# if the `q` key was pressed, break from the loop
elif key == ord("q"):
break
# if we are using a webcam, release the pointer
if not args.get("video", False):
vs.stop()
# otherwise, release the file pointer
else:
vs.release()
# close all windows
cv2.destroyAllWindows()
x_pix_com = np.array(x_pix_com)
return x_pix_com
if __name__ == "__main__":
object_tracker()
# 1. integrate ROS node - publish to node
# 2. camera rig
# 3. smoothing filter for data
# 4. Homography transform matrix
# 5.
| 33.457286 | 78 | 0.572544 |
8f6cbcfd7963abee0436f9d1d7460060756ba3c6 | 16,514 | py | Python | msl/equipment/resources/thorlabs/kinesis/filter_flipper.py | SwiftyMorgan/msl-equipment | 56bc467e97a2a0a60aa6f031dd30bf1d98ebda5c | [
"MIT"
] | null | null | null | msl/equipment/resources/thorlabs/kinesis/filter_flipper.py | SwiftyMorgan/msl-equipment | 56bc467e97a2a0a60aa6f031dd30bf1d98ebda5c | [
"MIT"
] | null | null | null | msl/equipment/resources/thorlabs/kinesis/filter_flipper.py | SwiftyMorgan/msl-equipment | 56bc467e97a2a0a60aa6f031dd30bf1d98ebda5c | [
"MIT"
] | null | null | null | """
This module provides all the functionality required to control a
Filter Flipper (MFF101, MFF102).
"""
from ctypes import byref, c_int64
from msl.equipment.resources import register
from msl.equipment.resources.utils import WORD, DWORD
from msl.equipment.resources.thorlabs.kinesis.motion_control import MotionControl
from msl.equipment.resources.thorlabs.kinesis.api_functions import FilterFlipper_FCNS
from msl.equipment.resources.thorlabs.kinesis.structs import FF_IOSettings
from msl.equipment.resources.thorlabs.kinesis.enums import FF_IOModes, FF_SignalModes
@register(manufacturer=r'Thorlabs', model=r'MFF10[1|2]')
class FilterFlipper(MotionControl):
MIN_TRANSIT_TIME = 300
MAX_TRANSIT_TIME = 2800
MIN_PULSE_WIDTH = 10
MAX_PULSE_WIDTH = 200
def __init__(self, record):
"""A wrapper around ``Thorlabs.MotionControl.FilterFlipper.dll``.
The :attr:`~msl.equipment.record_types.ConnectionRecord.properties`
for a FilterFlipper connection supports the following key-value pairs in the
:ref:`connections_database`::
'device_name': str, the device name found in ThorlabsDefaultSettings.xml [default: None]
Do not instantiate this class directly. Use the :meth:`~.EquipmentRecord.connect`
method to connect to the equipment.
Parameters
----------
record : :class:`~msl.equipment.record_types.EquipmentRecord`
A record from an :ref:`equipment_database`.
"""
name = record.connection.properties.get('device_name')
if name is None:
record.connection.properties['device_name'] = 'MFF Filter Flipper'
super(FilterFlipper, self).__init__(record, FilterFlipper_FCNS)
def open(self):
"""Open the device for communication.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
self.sdk.FF_Open(self._serial)
def close(self):
"""Disconnect and close the device."""
self.sdk.FF_Close(self._serial)
def check_connection(self):
"""Check connection.
Returns
-------
:class:`bool`
Whether the USB is listed by the FTDI controller.
"""
return self.sdk.FF_CheckConnection(self._serial)
def identify(self):
"""Sends a command to the device to make it identify itself."""
self.sdk.FF_Identify(self._serial)
def get_hardware_info(self):
"""Gets the hardware information from the device.
Returns
-------
:class:`.structs.TLI_HardwareInformation`
The hardware information.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
return self._get_hardware_info(self.sdk.FF_GetHardwareInfo)
def get_firmware_version(self):
"""Gets version number of the device firmware.
Returns
-------
:class:`str`
The firmware version.
"""
return self.to_version(self.sdk.FF_GetFirmwareVersion(self._serial))
def get_software_version(self):
"""Gets version number of the device software.
Returns
-------
:class:`str`
The device software version.
"""
return self.to_version(self.sdk.FF_GetSoftwareVersion(self._serial))
def load_settings(self):
"""Update device with stored settings.
The settings are read from ``ThorlabsDefaultSettings.xml``, which
gets created when the Kinesis software is installed.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
self.sdk.FF_LoadSettings(self._serial)
def load_named_settings(self, settings_name):
"""Update device with named settings.
Parameters
----------
settings_name : :class:`str`
The name of the device to load the settings for. Examples for the value
of `setting_name` can be found in `ThorlabsDefaultSettings.xml``, which
gets created when the Kinesis software is installed.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
self.sdk.FF_LoadNamedSettings(self._serial, settings_name)
def persist_settings(self):
"""Persist the devices current settings.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
self.sdk.FF_PersistSettings(self._serial)
def get_number_positions(self):
"""Get number of positions available from the device.
Returns
-------
:class:`int`
The number of positions.
"""
return self.sdk.FF_GetNumberPositions(self._serial)
def home(self):
"""Home the device.
Homing the device will set the device to a known state and determine
the home position.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
self.sdk.FF_Home(self._serial)
def move_to_position(self, position):
"""Move the device to the specified position (index).
Parameters
----------
position : :class:`int`
The required position. Must be 1 or 2.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
self.sdk.FF_MoveToPosition(self._serial, position)
def get_position(self):
"""Get the current position (index).
Returns
-------
:class:`int`
The current position, 1 or 2.
"""
return self.sdk.FF_GetPosition(self._serial)
def get_io_settings(self):
"""Gets the I/O settings from filter flipper.
Returns
-------
:class:`~.structs.FF_IOSettings`
The Filter Flipper I/O settings.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
settings = FF_IOSettings()
self.sdk.FF_GetIOSettings(self._serial, byref(settings))
return settings
def request_io_settings(self):
"""Requests the I/O settings from the filter flipper.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
self.sdk.FF_RequestIOSettings(self._serial)
def set_io_settings(self, transit_time=500,
oper1=FF_IOModes.FF_ToggleOnPositiveEdge, sig1=FF_SignalModes.FF_InputButton, pw1=200,
oper2=FF_IOModes.FF_ToggleOnPositiveEdge, sig2=FF_SignalModes.FF_OutputLevel, pw2=200):
"""
Sets the settings on filter flipper.
Parameters
----------
transit_time : :class:`int`, optional
Time taken to get from one position to other in milliseconds.
oper1 : :class:`~.enums.FF_IOModes`, optional
I/O 1 Operating Mode.
sig1 : :class:`~.enums.FF_SignalModes`, optional
I/O 1 Signal Mode.
pw1 : :class:`int`, optional
Digital I/O 1 pulse width in milliseconds.
oper2 : :class:`~.enums.FF_IOModes`, optional
I/O 2 Operating Mode.
sig2 : :class:`~.enums.FF_SignalModes`, optional
I/O 2 Signal Mode.
pw2 : :class:`int`, optional
Digital I/O 2 pulse width in milliseconds.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
if transit_time > self.MAX_TRANSIT_TIME or transit_time < self.MIN_TRANSIT_TIME:
msg = 'Invalid transit time value of {} ms; {} <= transit_time <= {}'.format(
transit_time, self.MIN_TRANSIT_TIME, self.MAX_TRANSIT_TIME)
self.raise_exception(msg)
if pw1 > self.MAX_PULSE_WIDTH or pw1 < self.MIN_PULSE_WIDTH:
msg = 'Invalid digital I/O 1 pulse width of {} ms; {} <= pw <= {}'.format(
pw1, self.MIN_PULSE_WIDTH, self.MAX_PULSE_WIDTH)
self.raise_exception(msg)
if pw2 > self.MAX_PULSE_WIDTH or pw2 < self.MIN_PULSE_WIDTH:
msg = 'Invalid digital I/O 2 pulse width of {} ms; {} <= pw <= {}'.format(
pw2, self.MIN_PULSE_WIDTH, self.MAX_PULSE_WIDTH)
self.raise_exception(msg)
settings = FF_IOSettings()
settings.transitTime = int(transit_time)
settings.digIO1OperMode = self.convert_to_enum(oper1, FF_IOModes, prefix='FF_')
settings.digIO1SignalMode = self.convert_to_enum(sig1, FF_SignalModes, prefix='FF_')
settings.digIO1PulseWidth = int(pw1)
settings.digIO2OperMode = self.convert_to_enum(oper2, FF_IOModes, prefix='FF_')
settings.digIO2SignalMode = self.convert_to_enum(sig2, FF_SignalModes, prefix='FF_')
settings.digIO2PulseWidth = int(pw2)
self.sdk.FF_SetIOSettings(self._serial, byref(settings))
def get_transit_time(self):
"""Gets the transit time.
Returns
-------
:class:`int`
The transit time in milliseconds.
"""
return self.sdk.FF_GetTransitTime(self._serial)
def set_transit_time(self, transit_time):
"""Sets the transit time.
Parameters
----------
transit_time : :class:`int`
The transit time in milliseconds.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
if transit_time > self.MAX_TRANSIT_TIME or transit_time < self.MIN_TRANSIT_TIME:
msg = 'Invalid transit time value of {} ms; {} <= transit_time <= {}'.format(
transit_time, self.MIN_TRANSIT_TIME, self.MAX_TRANSIT_TIME)
self.raise_exception(msg)
self.sdk.FF_SetTransitTime(self._serial, int(transit_time))
def request_status(self):
"""Request status bits.
This needs to be called to get the device to send it's current status.
This is called automatically if Polling is enabled for the device using
:meth:`.start_polling`.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
self.sdk.FF_RequestStatus(self._serial)
def get_status_bits(self):
"""Get the current status bits.
This returns the latest status bits received from the device. To get
new status bits, use :meth:`.request_status` or use the polling
function, :meth:`.start_polling`
Returns
-------
:class:`int`
The status bits from the device.
"""
return self.sdk.FF_GetStatusBits(self._serial)
def start_polling(self, milliseconds):
"""Starts the internal polling loop.
This function continuously requests position and status messages.
Parameters
----------
milliseconds : :class:`int`
The polling rate, in milliseconds.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
self.sdk.FF_StartPolling(self._serial, int(milliseconds))
def polling_duration(self):
"""Gets the polling loop duration.
Returns
-------
:class:`int`
The time between polls in milliseconds or 0 if polling is not active.
"""
return self.sdk.FF_PollingDuration(self._serial)
def stop_polling(self):
"""Stops the internal polling loop."""
self.sdk.FF_StopPolling(self._serial)
def time_since_last_msg_received(self):
"""Gets the time, in milliseconds, since tha last message was received.
This can be used to determine whether communications with the device is
still good.
Returns
-------
:class:`int`
The time, in milliseconds, since the last message was received.
"""
ms = c_int64()
self.sdk.FF_TimeSinceLastMsgReceived(self._serial, byref(ms))
return ms.value
def enable_last_msg_timer(self, enable, msg_timeout=0):
"""Enables the last message monitoring timer.
This can be used to determine whether communications with the device is
still good.
Parameters
----------
enable : :class:`bool`
:data:`True` to enable monitoring otherwise :data:`False` to disable.
msg_timeout : :class:`int`, optional
The last message error timeout in ms. Set to 0 to disable.
"""
self.sdk.FF_EnableLastMsgTimer(self._serial, enable, msg_timeout)
def has_last_msg_timer_overrun(self):
"""Queries if the time since the last message has exceeded the
``lastMsgTimeout`` set by :meth:`.enable_last_msg_timer`.
This can be used to determine whether communications with the device is
still good.
Returns
-------
:class:`bool`
:data:`True` if last message timer has elapsed or
:data:`False` if monitoring is not enabled or if time of last message
received is less than ``msg_timeout``.
"""
return self.sdk.FF_HasLastMsgTimerOverrun(self._serial)
def request_settings(self):
"""Requests that all settings are downloaded from the device.
This function requests that the device upload all it's settings to the
DLL.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
self.sdk.FF_RequestSettings(self._serial)
def clear_message_queue(self):
"""Clears the device message queue."""
self.sdk.FF_ClearMessageQueue(self._serial)
def register_message_callback(self, callback):
"""Registers a callback on the message queue.
Parameters
----------
callback : :class:`~msl.equipment.resources.thorlabs.kinesis.callbacks.MotionControlCallback`
A function to be called whenever messages are received.
"""
self.sdk.FF_RegisterMessageCallback(self._serial, callback)
def message_queue_size(self):
"""Gets the size of the message queue.
Returns
-------
:class:`int`
The number of messages in the queue.
"""
return self.sdk.FF_MessageQueueSize(self._serial)
def get_next_message(self):
"""Get the next Message Queue item. See :mod:`.messages`.
Returns
-------
:class:`int`
The message type.
:class:`int`
The message ID.
:class:`int`
The message data.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
message_type = WORD()
message_id = WORD()
message_data = DWORD()
self.sdk.FF_GetNextMessage(self._serial, byref(message_type), byref(message_id), byref(message_data))
return message_type.value, message_id.value, message_data.value
def wait_for_message(self):
"""Wait for next Message Queue item. See :mod:`.messages`.
Returns
-------
:class:`int`
The message type.
:class:`int`
The message ID.
:class:`int`
The message data.
Raises
------
:exc:`~msl.equipment.exceptions.ThorlabsError`
If not successful.
"""
message_type = WORD()
message_id = WORD()
message_data = DWORD()
self.sdk.FF_WaitForMessage(self._serial, byref(message_type), byref(message_id), byref(message_data))
return message_type.value, message_id.value, message_data.value
if __name__ == '__main__':
from msl.equipment.resources.thorlabs.kinesis import _print
_print(FilterFlipper, FilterFlipper_FCNS, 'Thorlabs.MotionControl.FilterFlipper.h')
| 33.227364 | 111 | 0.600763 |
a999594bb14f28f0db768a45cd8080dbcd65d34c | 308 | py | Python | python__OOP/01.defining_classes_lab/01.rhombus.py | EmilianStoyanov/Projects-in-SoftUni | e83996670fe00424a158905d537a7bbbeee8fb59 | [
"MIT"
] | 1 | 2020-07-14T12:32:47.000Z | 2020-07-14T12:32:47.000Z | python__OOP/01.defining_classes_lab/01.rhombus.py | EmilianStoyanov/Projects-in-SoftUni | e83996670fe00424a158905d537a7bbbeee8fb59 | [
"MIT"
] | null | null | null | python__OOP/01.defining_classes_lab/01.rhombus.py | EmilianStoyanov/Projects-in-SoftUni | e83996670fe00424a158905d537a7bbbeee8fb59 | [
"MIT"
] | null | null | null | def generate_line(index, n):
indent = ' ' * (n - index - 1)
stars = "* " * (index + 1)
return f"{indent}{stars}"
def print_rhombus(n):
for i in range(n):
print(generate_line(i, n))
for i in range(n - 2, -1, -1):
print(generate_line(i, n))
print_rhombus(int(input()))
| 19.25 | 34 | 0.548701 |
0c074ce20d2d3c1bb021e0209c262872b7eeeca4 | 95 | py | Python | mysettings/apps.py | ProjectFFF/FFF | a563e2bb5aafe18d3fa3143d83b6558921eac8ee | [
"BSD-2-Clause"
] | 6 | 2020-09-02T18:48:28.000Z | 2022-02-06T11:13:06.000Z | mysettings/apps.py | ProjectFFF/FFF | a563e2bb5aafe18d3fa3143d83b6558921eac8ee | [
"BSD-2-Clause"
] | 23 | 2020-09-04T08:57:28.000Z | 2020-10-25T07:03:47.000Z | mysettings/apps.py | ProjectFFF/FFF | a563e2bb5aafe18d3fa3143d83b6558921eac8ee | [
"BSD-2-Clause"
] | null | null | null | from django.apps import AppConfig
class MysettingsConfig(AppConfig):
name = 'mysettings'
| 15.833333 | 34 | 0.768421 |
9600f30d5ac644f61b1328af23712d7f9dc68d1b | 497 | py | Python | insert_farmacia.py | VictorCastao/BD-Trabalhos | 83fe69ee9cd9fe8c258e9c24faaa3b79353fd1b4 | [
"MIT"
] | null | null | null | insert_farmacia.py | VictorCastao/BD-Trabalhos | 83fe69ee9cd9fe8c258e9c24faaa3b79353fd1b4 | [
"MIT"
] | null | null | null | insert_farmacia.py | VictorCastao/BD-Trabalhos | 83fe69ee9cd9fe8c258e9c24faaa3b79353fd1b4 | [
"MIT"
] | null | null | null | import csv
import psycopg2
conn = psycopg2.connect(database="base_farmacia", user="postgres", password="banco", host="localhost",port=5432)
cur = conn.cursor()
with open('/home/vgcc287/Downloads/remedio.csv', 'r') as csv_file:
csv_reader = csv.reader(csv_file)
for line in csv_reader:
x=line
x[0]=int(x[0])
x[2]=float(x[2])
#z = x.split(",")
print(x)
cur.execute("INSERT INTO farmacia.remedios VALUES (%s ,%s ,%s)", x)
conn.commit()
| 33.133333 | 112 | 0.617706 |
2464baaa60953b1f88448271e908ec65eac8bfa9 | 18,074 | py | Python | parlai/agents/transformer/polyencoder.py | omry/ParlAI | 61703c7b76dce45bc7f7282b20a35be64c6a0880 | [
"MIT"
] | 1 | 2019-11-08T18:38:42.000Z | 2019-11-08T18:38:42.000Z | parlai/agents/transformer/polyencoder.py | omry/ParlAI | 61703c7b76dce45bc7f7282b20a35be64c6a0880 | [
"MIT"
] | 3 | 2021-05-08T09:44:24.000Z | 2021-07-21T14:03:28.000Z | parlai/agents/transformer/polyencoder.py | omry/ParlAI | 61703c7b76dce45bc7f7282b20a35be64c6a0880 | [
"MIT"
] | 1 | 2020-04-16T15:56:29.000Z | 2020-04-16T15:56:29.000Z | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# hack to make sure -m transformer/generator works as expected
"""Poly-encoder Agent."""
from .modules import TransformerEncoder
from .modules import get_n_positions_from_options
from parlai.core.torch_ranker_agent import TorchRankerAgent
from .transformer import TransformerRankerAgent
from .modules import BasicAttention, MultiHeadAttention
import torch
class PolyencoderAgent(TorchRankerAgent):
"""Poly-encoder Agent.
Equivalent of bert_ranker/polyencoder and biencoder_multiple_output
but does not rely on an external library (hugging face).
"""
@classmethod
def add_cmdline_args(cls, argparser):
"""Add command-line arguments specifically for this agent."""
TransformerRankerAgent.add_cmdline_args(argparser)
agent = argparser.add_argument_group('Polyencoder Arguments')
agent.add_argument(
'--polyencoder-type',
type=str,
default='codes',
choices=['codes', 'n_first'],
help='Type of polyencoder, either we compute'
'vectors using codes + attention, or we '
'simply take the first N vectors.',
)
agent.add_argument(
'--poly-n-codes',
type=int,
default=64,
help='number of vectors used to represent the context'
'in the case of n_first, those are the number'
'of vectors that are considered.',
)
agent.add_argument(
'--poly-attention-type',
type=str,
default='basic',
choices=['basic', 'sqrt', 'multihead'],
help='Type of the top aggregation layer of the poly-'
'encoder (where the candidate representation is'
'the key)',
)
agent.add_argument(
'--polyencoder-attention-keys',
type=str,
default='context',
choices=['context', 'position'],
help='Input emb vectors for the first level of attention. '
'Context refers to the context outputs; position refers to the '
'computed position embeddings.',
)
agent.add_argument(
'--poly-attention-num-heads',
type=int,
default=4,
help='In case poly-attention-type is multihead, '
'specify the number of heads',
)
# Those arguments are here in case where polyencoder type is 'code'
agent.add_argument(
'--codes-attention-type',
type=str,
default='basic',
choices=['basic', 'sqrt', 'multihead'],
help='Type ',
)
agent.add_argument(
'--codes-attention-num-heads',
type=int,
default=4,
help='In case codes-attention-type is multihead, '
'specify the number of heads',
)
return agent
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.rank_loss = torch.nn.CrossEntropyLoss(reduce=True, size_average=True)
if self.use_cuda:
self.rank_loss.cuda()
self.data_parallel = opt.get('data_parallel') and self.use_cuda
if self.data_parallel:
from parlai.utils.distributed import is_distributed
if is_distributed():
raise ValueError('Cannot combine --data-parallel and distributed mode')
if shared is None:
self.model = torch.nn.DataParallel(self.model)
def build_model(self, states=None):
"""Return built model."""
return PolyEncoderModule(self.opt, self.dict, self.NULL_IDX)
def vectorize(self, *args, **kwargs):
"""Add the start and end token to the labels."""
kwargs['add_start'] = True
kwargs['add_end'] = True
obs = super().vectorize(*args, **kwargs)
return obs
def _set_text_vec(self, *args, **kwargs):
"""Add the start and end token to the text."""
obs = super()._set_text_vec(*args, **kwargs)
if 'text_vec' in obs and 'added_start_end_tokens' not in obs:
obs.force_set(
'text_vec', self._add_start_end_tokens(obs['text_vec'], True, True)
)
obs['added_start_end_tokens'] = True
return obs
def vectorize_fixed_candidates(self, *args, **kwargs):
"""Vectorize fixed candidates.
Override to add start and end token when computing the candidate encodings
in interactive mode.
"""
kwargs['add_start'] = True
kwargs['add_end'] = True
return super().vectorize_fixed_candidates(*args, **kwargs)
def _make_candidate_encs(self, vecs):
"""Make candidate encs.
The polyencoder module expects
cand vecs to be 3D while torch_ranker_agent expects it to be 2D.
This requires a little adjustment (used in interactive mode only)
"""
rep = super()._make_candidate_encs(vecs)
return rep.transpose(0, 1).contiguous()
def encode_candidates(self, padded_cands):
"""Encode candidates."""
padded_cands = padded_cands.unsqueeze(1)
_, _, _, cand_rep = self.model(cand_tokens=padded_cands)
return cand_rep
def score_candidates(self, batch, cand_vecs, cand_encs=None):
"""Score candidates.
The Poly-encoder encodes the candidate and context independently. Then,
the model applies additional attention before ultimately scoring a
candidate.
"""
bsz = batch.text_vec.size(0)
ctxt_rep, ctxt_rep_mask, ctxt_pos, _ = self.model(ctxt_tokens=batch.text_vec)
if cand_encs is not None:
if bsz == 1:
cand_rep = cand_encs
else:
cand_rep = cand_encs.expand(bsz, cand_encs.size(1), -1)
# bsz x num cands x seq len
elif len(cand_vecs.shape) == 3:
_, _, _, cand_rep = self.model(cand_tokens=cand_vecs)
# bsz x seq len (if batch cands) or num_cands x seq len (if fixed cands)
elif len(cand_vecs.shape) == 2:
_, _, _, cand_rep = self.model(cand_tokens=cand_vecs.unsqueeze(1))
num_cands = cand_rep.size(0) # will be bsz if using batch cands
cand_rep = cand_rep.expand(num_cands, bsz, -1).transpose(0, 1).contiguous()
scores = self.model(
ctxt_rep=ctxt_rep,
ctxt_rep_mask=ctxt_rep_mask,
cand_rep=cand_rep,
ctxt_pos=ctxt_pos,
)
return scores
def load_state_dict(self, state_dict):
"""Override to account for codes."""
if self.model.type == 'codes' and 'codes' not in state_dict:
state_dict['codes'] = self.model.codes
super().load_state_dict(state_dict)
class PolyEncoderModule(torch.nn.Module):
"""Poly-encoder model.
See https://arxiv.org/abs/1905.01969 for more details
"""
def __init__(self, opt, dict, null_idx):
super(PolyEncoderModule, self).__init__()
self.null_idx = null_idx
self.encoder_ctxt = self.get_encoder(opt, dict, null_idx, 'none_with_pos_embs')
self.encoder_cand = self.get_encoder(opt, dict, null_idx, opt['reduction_type'])
self.type = opt['polyencoder_type']
self.n_codes = opt['poly_n_codes']
self.attention_type = opt['poly_attention_type']
self.attention_keys = opt.get('polyencoder_attention_keys', 'context')
self.attention_num_heads = opt['poly_attention_num_heads']
self.codes_attention_type = opt['codes_attention_type']
self.codes_attention_num_heads = opt['codes_attention_num_heads']
embed_dim = opt['embedding_size']
# In case it's a polyencoder with code.
if self.type == 'codes':
# experimentally it seems that random with size = 1 was good.
codes = torch.empty(self.n_codes, embed_dim)
codes = torch.nn.init.uniform_(codes)
self.codes = torch.nn.Parameter(codes)
# The attention for the codes.
if self.codes_attention_type == 'multihead':
self.code_attention = MultiHeadAttention(
self.codes_attention_num_heads, embed_dim, opt['dropout']
)
elif self.codes_attention_type == 'sqrt':
self.code_attention = PolyBasicAttention(
self.type, self.n_codes, dim=2, attn='sqrt', get_weights=False
)
elif self.codes_attention_type == 'basic':
self.code_attention = PolyBasicAttention(
self.type, self.n_codes, dim=2, attn='basic', get_weights=False
)
# The final attention (the one that takes the candidate as key)
if self.attention_type == 'multihead':
self.attention = MultiHeadAttention(
self.attention_num_heads, opt['embedding_size'], opt['dropout']
)
else:
self.attention = PolyBasicAttention(
self.type,
self.n_codes,
dim=2,
attn=self.attention_type,
get_weights=False,
)
def get_encoder(self, opt, dict, null_idx, reduction_type):
"""Return encoder, given options.
:param opt:
opt dict
:param dict:
dictionary agent
:param null_idx:
null/pad index into dict
:reduction_type:
reduction type for the encoder
:return:
a TransformerEncoder, initialized correctly
"""
n_positions = get_n_positions_from_options(opt)
embeddings = torch.nn.Embedding(
len(dict), opt['embedding_size'], padding_idx=null_idx
)
torch.nn.init.normal_(embeddings.weight, 0, opt['embedding_size'] ** -0.5)
return TransformerEncoder(
n_heads=opt['n_heads'],
n_layers=opt['n_layers'],
embedding_size=opt['embedding_size'],
ffn_size=opt['ffn_size'],
vocabulary_size=len(dict),
embedding=embeddings,
dropout=opt['dropout'],
attention_dropout=opt['attention_dropout'],
relu_dropout=opt['relu_dropout'],
padding_idx=null_idx,
learn_positional_embeddings=opt['learn_positional_embeddings'],
embeddings_scale=opt['embeddings_scale'],
reduction_type=reduction_type,
n_positions=n_positions,
n_segments=2,
activation=opt['activation'],
variant=opt['variant'],
output_scaling=opt['output_scaling'],
)
def attend(self, attention_layer, queries, keys, values, mask):
"""Apply attention.
:param attention_layer:
nn.Module attention layer to use for the attention
:param queries:
the queries for attention
:param keys:
the keys for attention
:param values:
the values for attention
:param mask:
mask for the attention keys
:return:
the result of applying attention to the values, with weights computed
wrt to the queries and keys.
"""
if keys is None:
keys = values
if isinstance(attention_layer, PolyBasicAttention):
return attention_layer(queries, keys, mask_ys=mask, values=values)
elif isinstance(attention_layer, MultiHeadAttention):
return attention_layer(queries, keys, values, mask)
else:
raise Exception('Unrecognized type of attention')
def encode(self, ctxt_tokens, cand_tokens):
"""Encode a text sequence.
:param ctxt_tokens:
2D long tensor, batchsize x sent_len
:param cand_tokens:
3D long tensor, batchsize x num_cands x sent_len
Note this will actually view it as a 2D tensor
:return:
(ctxt_rep, ctxt_mask, ctxt_pos, cand_rep)
- ctxt_rep 3D float tensor, batchsize x n_codes x dim
- ctxt_mask byte: batchsize x n_codes (all 1 in case
of polyencoder with code. Which are the vectors to use
in the ctxt_rep)
- ctxt_pos 3D float tensor, batchsize x sent_len x dim
- cand_rep (3D float tensor) batchsize x num_cands x dim
"""
cand_embed = None
ctxt_rep = None
ctxt_rep_mask = None
ctxt_pos = None
if cand_tokens is not None:
assert len(cand_tokens.shape) == 3
bsz = cand_tokens.size(0)
num_cands = cand_tokens.size(1)
cand_embed = self.encoder_cand(cand_tokens.view(bsz * num_cands, -1))
cand_embed = cand_embed.view(bsz, num_cands, -1)
if ctxt_tokens is not None:
assert len(ctxt_tokens.shape) == 2
bsz = ctxt_tokens.size(0)
# get context_representation. Now that depends on the cases.
ctxt_out, ctxt_mask, ctxt_pos = self.encoder_ctxt(ctxt_tokens)
att_keys = ctxt_out if self.attention_keys == 'context' else ctxt_pos
dim = ctxt_out.size(2)
if self.type == 'codes':
ctxt_rep = self.attend(
self.code_attention,
queries=self.codes.repeat(bsz, 1, 1),
keys=att_keys,
values=ctxt_out,
mask=ctxt_mask,
)
ctxt_pos = None # we don't need this anymore
ctxt_rep_mask = ctxt_rep.new_ones(bsz, self.n_codes).byte()
elif self.type == 'n_first':
# Expand the output if it is not long enough
if ctxt_out.size(1) < self.n_codes:
difference = self.n_codes - ctxt_out.size(1)
extra_rep = ctxt_out.new_zeros(bsz, difference, dim)
ctxt_rep = torch.cat([ctxt_out, extra_rep], dim=1)
ctxt_pos = torch.cat([ctxt_pos, extra_rep], dim=1)
extra_mask = ctxt_mask.new_zeros(bsz, difference)
ctxt_rep_mask = torch.cat([ctxt_mask, extra_mask], dim=1)
else:
ctxt_rep = ctxt_out[:, 0 : self.n_codes, :]
ctxt_pos = ctxt_pos[:, 0 : self.n_codes, :]
ctxt_rep_mask = ctxt_mask[:, 0 : self.n_codes]
return ctxt_rep, ctxt_rep_mask, ctxt_pos, cand_embed
def score(self, ctxt_rep, ctxt_rep_mask, ctxt_pos, cand_embed):
"""Score the candidates.
:param ctxt_rep:
3D float tensor, bsz x ctxt_len x dim
:param ctxt_rep_mask:
2D byte tensor, bsz x ctxt_len, in case there are some elements
of the ctxt that we should not take into account.
:param ctx_pos: 3D float tensor, bsz x sent_len x dim
:param cand_embed: 3D float tensor, bsz x num_cands x dim
:return: scores, 2D float tensor: bsz x num_cands
"""
# Attention keys determined by self.attention_keys
# 'context' == use context final rep; otherwise use context position embs
keys = ctxt_rep if self.attention_keys == 'context' else ctxt_pos
# reduces the context representation to a 3D tensor bsz x num_cands x dim
ctxt_final_rep = self.attend(
self.attention, cand_embed, keys, ctxt_rep, ctxt_rep_mask
)
scores = torch.sum(ctxt_final_rep * cand_embed, 2)
return scores
def forward(
self,
ctxt_tokens=None,
cand_tokens=None,
ctxt_rep=None,
ctxt_rep_mask=None,
ctxt_pos=None,
cand_rep=None,
):
"""Forward pass of the model.
Due to a limitation of parlai, we have to have one single model
in the agent. And because we want to be able to use data-parallel,
we need to have one single forward() method.
Therefore the operation_type can be either 'encode' or 'score'.
:param ctxt_tokens:
tokenized contexts
:param cand_tokens:
tokenized candidates
:param ctxt_rep:
(bsz x num_codes x hsz)
encoded representation of the context. If self.type == 'codes', these
are the context codes. Otherwise, they are the outputs from the
encoder
:param ctxt_rep_mask:
mask for ctxt rep
:param ctxt_pos:
position embeddings for the ctxt_rep. If self.type == 'codes', these
are None, as their use is earlier in the pipeline.
:param cand_rep:
encoded representation of the candidates
"""
if ctxt_tokens is not None or cand_tokens is not None:
return self.encode(ctxt_tokens, cand_tokens)
elif (
ctxt_rep is not None and ctxt_rep_mask is not None and cand_rep is not None
):
# ctxt_pos can be none, if we are using codes (not first M)
return self.score(ctxt_rep, ctxt_rep_mask, ctxt_pos, cand_rep)
raise Exception('Unsupported operation')
class PolyBasicAttention(BasicAttention):
"""Override basic attention to account for edge case for polyencoder."""
def __init__(self, poly_type, n_codes, *args, **kwargs):
super().__init__(*args, **kwargs)
self.poly_type = poly_type
self.n_codes = n_codes
def forward(self, *args, **kwargs):
"""Forward pass.
Account for accidental dimensionality reduction when num_codes is 1
and the polyencoder type is 'codes'
"""
lhs_emb = super().forward(*args, **kwargs)
if self.poly_type == 'codes' and self.n_codes == 1 and len(lhs_emb.shape) == 2:
lhs_emb = lhs_emb.unsqueeze(self.dim - 1)
return lhs_emb
| 39.462882 | 88 | 0.599535 |
f8ca258b1611d5caad9072a9a870950f37e5b9f4 | 2,304 | py | Python | src/poliastro/core/thrust/change_ecc_inc.py | DhruvJ22/poliastro | ac5fafc6d054b2c545e111e5a6aa32259998074a | [
"MIT"
] | 8 | 2015-05-09T17:21:57.000Z | 2020-01-28T06:59:18.000Z | src/poliastro/core/thrust/change_ecc_inc.py | DhruvJ22/poliastro | ac5fafc6d054b2c545e111e5a6aa32259998074a | [
"MIT"
] | 4 | 2015-12-29T13:08:01.000Z | 2019-12-27T12:58:04.000Z | src/poliastro/core/thrust/change_ecc_inc.py | DhruvJ22/poliastro | ac5fafc6d054b2c545e111e5a6aa32259998074a | [
"MIT"
] | 1 | 2016-10-05T08:34:44.000Z | 2016-10-05T08:34:44.000Z | """Simultaneous eccentricity and inclination changes.
References
----------
* Pollard, J. E. "Simplified Analysis of Low-Thrust Orbital Maneuvers", 2000.
"""
import numpy as np
from numba import njit as jit
from numpy import cross
from poliastro._math.linalg import norm
from poliastro.core.elements import circular_velocity, eccentricity_vector, rv2coe
@jit
def beta(ecc_0, ecc_f, inc_0, inc_f, argp):
# Note: "The argument of perigee will vary during the orbit transfer
# due to the natural drift and because e may approach zero.
# However, [the equation] still gives a good estimate of the desired
# thrust angle."
return np.arctan(
abs(
3
* np.pi
* (inc_f - inc_0)
/ (
4
* np.cos(argp)
* (
ecc_0
- ecc_f
+ np.log((1 + ecc_f) * (-1 + ecc_0) / ((1 + ecc_0) * (-1 + ecc_f)))
)
)
)
)
@jit
def delta_V(V_0, ecc_0, ecc_f, beta_):
"""Compute required increment of velocity."""
return 2 * V_0 * np.abs(np.arcsin(ecc_0) - np.arcsin(ecc_f)) / (3 * np.cos(beta_))
@jit
def delta_t(delta_v, f):
"""Compute required increment of velocity."""
return delta_v / f
def change_ecc_inc(k, a, ecc_0, ecc_f, inc_0, inc_f, argp, r, v, f):
# We fix the inertial direction at the beginning
if ecc_0 > 0.001: # Arbitrary tolerance
e_vec = eccentricity_vector(k, r, v)
ref_vec = e_vec / ecc_0
else:
ref_vec = r / norm(r)
h_vec = cross(r, v) # Specific angular momentum vector
h_unit = h_vec / norm(h_vec)
thrust_unit = cross(h_unit, ref_vec) * np.sign(ecc_f - ecc_0)
beta_0 = beta(ecc_0, ecc_f, inc_0, inc_f, argp)
@jit
def a_d(t0, u_, k_):
r_ = u_[:3]
v_ = u_[3:]
nu = rv2coe(k_, r_, v_)[-1]
beta_ = beta_0 * np.sign(
np.cos(nu)
) # The sign of ß reverses at minor axis crossings
w_ = cross(r_, v_) / norm(cross(r_, v_))
accel_v = f * (np.cos(beta_) * thrust_unit + np.sin(beta_) * w_)
return accel_v
delta_v = delta_V(circular_velocity(k, a), ecc_0, ecc_f, beta_0)
t_f = delta_t(delta_v, f)
return a_d, delta_v, t_f
| 27.759036 | 87 | 0.575087 |
c5d53764bfea63f23c517c15e32974842820adb1 | 310 | py | Python | London_COVID_maps/_nbdev.py | lukemshepherd/London_Covid_Maps | 44ed9d2ff613dbf00e1d0ac007f67df82e86899e | [
"Apache-2.0"
] | null | null | null | London_COVID_maps/_nbdev.py | lukemshepherd/London_Covid_Maps | 44ed9d2ff613dbf00e1d0ac007f67df82e86899e | [
"Apache-2.0"
] | null | null | null | London_COVID_maps/_nbdev.py | lukemshepherd/London_Covid_Maps | 44ed9d2ff613dbf00e1d0ac007f67df82e86899e | [
"Apache-2.0"
] | null | null | null | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {}
modules = []
doc_url = "https://lukemshepherd.github.io/London_COVID_maps/"
git_url = "https://github.com/lukemshepherd/London_COVID_maps/tree/master/"
def custom_doc_links(name): return None
| 22.142857 | 75 | 0.732258 |
5ef7472bc1a579832e4cb4573c7bf25926725a18 | 3,751 | py | Python | ddtrace/contrib/tornado/__init__.py | dchengrove/dd-trace-py | 549e8d532679c2a7dd0c572ac0a0eb520f6e4d49 | [
"BSD-3-Clause"
] | 1 | 2020-03-10T01:45:56.000Z | 2020-03-10T01:45:56.000Z | ddtrace/contrib/tornado/__init__.py | dchengrove/dd-trace-py | 549e8d532679c2a7dd0c572ac0a0eb520f6e4d49 | [
"BSD-3-Clause"
] | null | null | null | ddtrace/contrib/tornado/__init__.py | dchengrove/dd-trace-py | 549e8d532679c2a7dd0c572ac0a0eb520f6e4d49 | [
"BSD-3-Clause"
] | null | null | null | """
The Tornado integration traces all ``RequestHandler`` defined in a Tornado web application.
Auto instrumentation is available using the ``patch`` function that **must be called before**
importing the tornado library. The following is an example::
# patch before importing tornado and concurrent.futures
from ddtrace import tracer, patch
patch(tornado=True)
import tornado.web
import tornado.gen
import tornado.ioloop
# create your handlers
class MainHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def get(self):
self.write("Hello, world")
# create your application
app = tornado.web.Application([
(r'/', MainHandler),
])
# and run it as usual
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
When any type of ``RequestHandler`` is hit, a request root span is automatically created. If
you want to trace more parts of your application, you can use the ``wrap()`` decorator and
the ``trace()`` method as usual::
class MainHandler(tornado.web.RequestHandler):
@tornado.gen.coroutine
def get(self):
yield self.notify()
yield self.blocking_method()
with tracer.trace('tornado.before_write') as span:
# trace more work in the handler
@tracer.wrap('tornado.executor_handler')
@tornado.concurrent.run_on_executor
def blocking_method(self):
# do something expensive
@tracer.wrap('tornado.notify', service='tornado-notification')
@tornado.gen.coroutine
def notify(self):
# do something
Tornado settings can be used to change some tracing configuration, like::
settings = {
'datadog_trace': {
'default_service': 'my-tornado-app',
'tags': {'env': 'production'},
'distributed_tracing': False,
'analytics_enabled': False,
'settings': {
'FILTERS': [
FilterRequestsOnUrl(r'http://test\\.example\\.com'),
],
},
},
}
app = tornado.web.Application([
(r'/', MainHandler),
], **settings)
The available settings are:
* ``default_service`` (default: `tornado-web`): set the service name used by the tracer. Usually
this configuration must be updated with a meaningful name.
* ``tags`` (default: `{}`): set global tags that should be applied to all spans.
* ``enabled`` (default: `True`): define if the tracer is enabled or not. If set to `false`, the
code is still instrumented but no spans are sent to the APM agent.
* ``distributed_tracing`` (default: `True`): enable distributed tracing if this is called
remotely from an instrumented application.
We suggest to enable it only for internal services where headers are under your control.
* ``analytics_enabled`` (default: `None`): enable generating APM events for Trace Search & Analytics.
* ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent.
* ``agent_port`` (default: `8126`): define the port of the APM agent.
* ``settings`` (default: ``{}``): Tracer extra settings used to change, for instance, the filtering behavior.
"""
from ...utils.importlib import require_modules
required_modules = ['tornado']
with require_modules(required_modules) as missing_modules:
if not missing_modules:
from .stack_context import run_with_trace_context, TracerStackContext
context_provider = TracerStackContext()
from .patch import patch, unpatch
__all__ = [
'patch',
'unpatch',
'context_provider',
'run_with_trace_context',
'TracerStackContext',
]
| 35.386792 | 109 | 0.652093 |
7a3d88c1517cc2481bc33a612e0c1717650387b6 | 6,861 | py | Python | src/pyotr/server.py | berislavlopac/pyotr | 0324983eef6793a720f04ec99cb4c930c8943d75 | [
"MIT"
] | 53 | 2019-07-09T08:57:46.000Z | 2022-03-29T05:33:30.000Z | src/pyotr/server.py | berislavlopac/pyotr | 0324983eef6793a720f04ec99cb4c930c8943d75 | [
"MIT"
] | 13 | 2019-10-11T11:10:47.000Z | 2021-11-04T09:17:34.000Z | src/pyotr/server.py | berislavlopac/pyotr | 0324983eef6793a720f04ec99cb4c930c8943d75 | [
"MIT"
] | null | null | null | from collections import namedtuple
from functools import wraps
from http import HTTPStatus
from importlib import import_module
from inspect import iscoroutine
from pathlib import Path
from types import ModuleType
from typing import Callable, Optional, Union
from urllib.parse import urlsplit
from openapi_core import create_spec
from openapi_core.exceptions import OpenAPIError
from openapi_core.schema.specs.models import Spec
from openapi_core.shortcuts import RequestValidator, ResponseValidator
from openapi_core.validation.exceptions import InvalidSecurity
from starlette.applications import Starlette
from starlette.exceptions import HTTPException
from starlette.requests import Request
from starlette.responses import JSONResponse, Response
from stringcase import snakecase
from pyotr.utils import get_spec_from_file
from pyotr.validation.requests import StarletteOpenAPIRequest
from pyotr.validation.responses import StarletteOpenAPIResponse
Operation = namedtuple("Operation", "path method")
class Application(Starlette):
def __init__(
self,
spec: Union[Spec, dict],
*,
module: Optional[Union[str, ModuleType]] = None,
validate_responses: bool = True,
enforce_case: bool = True,
**kwargs,
):
super().__init__(**kwargs)
if not isinstance(spec, Spec):
spec = create_spec(spec)
self.spec = spec
self.validate_responses = validate_responses
self.enforce_case = enforce_case
self._server_paths = {urlsplit(server.url).path for server in self.spec.servers}
self.custom_formatters = None
self.custom_media_type_deserializers = None
self._operations = {
oper.operation_id: Operation(path, method)
for path, path_spec in spec.paths.items()
for method, oper in path_spec.operations.items()
}
if module is not None:
if isinstance(module, str):
module = _load_module(module)
for operation_id, operation in self._operations.items():
name = operation_id
if "." in name:
base, name = name.rsplit(".", 1)
base_module = _load_module(f"{module.__name__}.{base}")
else:
base_module = module
if self.enforce_case:
name = snakecase(name)
try:
endpoint_fn = getattr(base_module, name)
except AttributeError as e:
raise RuntimeError(
f"The function `{base_module}.{name}` does not exist!"
) from e
self.set_endpoint(endpoint_fn, operation_id=operation_id)
def set_endpoint(self, endpoint_fn: Callable, *, operation_id: Optional[str] = None):
"""Sets endpoint function for a given `operationId`.
If the `operation_id` is not given, it will try to determine it
based on the function name.
"""
if operation_id is None:
operation_id = endpoint_fn.__name__
if self.enforce_case and operation_id not in self._operations:
operation_id_key = {snakecase(op_id): op_id for op_id in self._operations}.get(
operation_id
)
else:
operation_id_key = operation_id
try:
operation = self._operations[operation_id_key]
except KeyError as e:
raise ValueError(f"Unknown operationId: {operation_id}.") from e
@wraps(endpoint_fn)
async def wrapper(request: Request, **kwargs) -> Response:
openapi_request = await StarletteOpenAPIRequest(request)
validated_request = RequestValidator(
self.spec,
custom_formatters=self.custom_formatters,
custom_media_type_deserializers=self.custom_media_type_deserializers,
).validate(openapi_request)
try:
validated_request.raise_for_errors()
except InvalidSecurity as ex:
raise HTTPException(HTTPStatus.FORBIDDEN, "Invalid security.") from ex
except OpenAPIError as ex:
raise HTTPException(HTTPStatus.BAD_REQUEST, "Bad request") from ex
response = endpoint_fn(request, **kwargs)
if iscoroutine(response):
response = await response
if isinstance(response, dict):
response = JSONResponse(response)
elif not isinstance(response, Response):
raise ValueError(
f"The endpoint function `{endpoint_fn.__name__}` must return"
" either a dict or a Starlette Response instance."
)
# TODO: pass a list of operation IDs to specify which responses not to validate
if self.validate_responses:
ResponseValidator(
self.spec,
custom_formatters=self.custom_formatters,
custom_media_type_deserializers=self.custom_media_type_deserializers,
).validate(
openapi_request, StarletteOpenAPIResponse(response)
).raise_for_errors()
return response
for server_path in self._server_paths:
self.add_route(
server_path + operation.path, wrapper, [operation.method], name=operation_id
)
def endpoint(self, operation_id: Union[Callable, str]):
"""Decorator for setting endpoints.
If used without arguments, it will try to determine the `operationId` based on the
decorated function name:
@app.endpoint
def foo_bar(request):
# sets the endpoint for operationId fooBar
Otherwise, the `operationId` can be set explicitly:
@app.endpoint('fooBar'):
def my_endpoint():
...
"""
if callable(operation_id):
self.set_endpoint(operation_id)
return operation_id
else:
def decorator(fn):
self.set_endpoint(fn, operation_id=operation_id)
return fn
return decorator
@classmethod
def from_file(cls, path: Union[Path, str], *args, **kwargs) -> "Application":
"""Creates an instance of the class by loading the spec from a local file."""
spec = get_spec_from_file(path)
return cls(spec, *args, **kwargs)
def _load_module(name: str) -> ModuleType:
"""Helper function to load module based on its dotted-string name."""
try:
module = import_module(name)
except ModuleNotFoundError as e:
raise RuntimeError(f"The module `{name}` does not exist!") from e
else:
return module
| 38.329609 | 92 | 0.624836 |
ae614310c6455bbc3e6cad0e6dcb076e8f549e1d | 815 | py | Python | src/data/1238.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/1238.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | src/data/1238.py | NULLCT/LOMC | 79a16474a8f21310e0fb47e536d527dd5dc6d655 | [
"MIT"
] | null | null | null | from collections import deque
def main():
N, Q = map(int, input().split())
path_dat = [list(map(int, input().split())) for _ in range(N - 1)]
queries = [list(map(int, input().split())) for _ in range(Q)]
paths = [[] for _ in range(N)]
for a, b in path_dat:
a -= 1
b -= 1
paths[a].append(b)
paths[b].append(a)
dist = [-1] * N
dist[0] = 0
queue = deque([0])
while queue:
now = queue.popleft()
for nxt in paths[now]:
if dist[nxt] != -1:
continue
dist[nxt] = dist[now] + 1
queue.append(nxt)
for c, d in queries:
c -= 1
d -= 1
tmp = dist[c] + dist[d]
if tmp % 2 == 0:
print('Town')
else:
print('Road')
main()
| 21.447368 | 70 | 0.451534 |
e1c654b46bd5a8cdd37f7c022af6307e3dab9109 | 1,591 | py | Python | src/waldur_rancher/urls.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | 2 | 2017-01-20T15:26:25.000Z | 2017-08-03T04:38:08.000Z | src/waldur_rancher/urls.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | null | null | null | src/waldur_rancher/urls.py | opennode/nodeconductor-assembly-waldur | cad9966389dc9b52b13d2301940c99cf4b243900 | [
"MIT"
] | null | null | null | from django.urls import re_path
from . import views
def register_in(router):
router.register(
r'rancher-clusters', views.ClusterViewSet, basename='rancher-cluster'
)
router.register(r'rancher-nodes', views.NodeViewSet, basename='rancher-node')
router.register(
r'rancher-catalogs', views.CatalogViewSet, basename='rancher-catalog'
)
router.register(
r'rancher-projects', views.ProjectViewSet, basename='rancher-project'
)
router.register(
r'rancher-namespaces', views.NamespaceViewSet, basename='rancher-namespace'
)
router.register(
r'rancher-templates', views.TemplateViewSet, basename='rancher-template'
)
router.register(r'rancher-users', views.UserViewSet, basename='rancher-user')
router.register(
r'rancher-workloads', views.WorkloadViewSet, basename='rancher-workload'
)
router.register(r'rancher-hpas', views.HPAViewSet, basename='rancher-hpa')
router.register(
r'rancher-cluster-templates',
views.ClusterTemplateViewSet,
basename='rancher-cluster-template',
)
router.register(r'rancher-apps', views.ApplicationViewSet, basename='rancher-app')
router.register(
r'rancher-ingresses', views.IngressViewSet, basename='rancher-ingress'
)
router.register(
r'rancher-services', views.ServiceViewSet, basename='rancher-service'
)
urlpatterns = [
re_path(
r'^api/rancher-template-versions/(?P<template_uuid>[a-f0-9]+)/(?P<version>[0-9.]+)/$',
views.TemplateVersionView.as_view(),
),
]
| 33.145833 | 94 | 0.686361 |
f63ee916e04837f8094bb61e87d641b1ffb41b51 | 1,683 | py | Python | shared/templates/create_sockets_disabled.py | fduthilleul/scap-security-guide | f9b67869600f6c20dcb0ba83801578cec1a51bba | [
"BSD-3-Clause"
] | null | null | null | shared/templates/create_sockets_disabled.py | fduthilleul/scap-security-guide | f9b67869600f6c20dcb0ba83801578cec1a51bba | [
"BSD-3-Clause"
] | null | null | null | shared/templates/create_sockets_disabled.py | fduthilleul/scap-security-guide | f9b67869600f6c20dcb0ba83801578cec1a51bba | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python2
#
# create_sockets_disabled.py
# automatically generate checks for disabled sockets.
#
# NOTE: The file 'template_socket_disabled' should be located in the same
# working directory as this script. The template contains the following tags
# that *must* be replaced successfully in order for the checks to work.
#
# SOCKETNAME - the name of the socket that should be disabled
# PACKAGENAME - the name of the package that installs the socket
#
import sys
import re
from template_common import *
def output_checkfile(socketinfo):
# get the items out of the list
socketname, packagename = socketinfo
file_content = load_modified(
"./template_socket_disabled",
{ "SOCKETNAME": socketname }
)
if packagename:
file_from_template(
"./template_socket_disabled",
{
"SOCKETNAME": socketname,
"PACKAGENAME": packagename
},
"./oval/socket_{0}_disabled.xml", socketname
)
else:
file_from_template(
"./template_socket_disabled",
{
"SOCKETNAME": socketname,
},
regex_replace = [
("\n\s*<criteria.*>\n\s*<extend_definition.*/>", ""),
("\s*</criteria>\n\s*</criteria>", "\n </criteria>")
],
filename_format = "./oval/socket_{0}_disabled.xml",
filename_value = socketname
)
def csv_format():
return ("Provide a CSV file containing lines of the format: " +
"socketname,packagename")
if __name__ == "__main__":
main(sys.argv, csv_format(), output_checkfile)
| 28.525424 | 76 | 0.607249 |
e778bbd10837ecf55bbf11f9a8cfebb4cb162d42 | 3,195 | py | Python | lib/jnpr/healthbot/swagger/models/trigger_action_schema.py | Juniper/healthbot-py-client | 49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6 | [
"Apache-2.0"
] | 10 | 2019-10-23T12:54:37.000Z | 2022-02-07T19:24:30.000Z | lib/jnpr/healthbot/swagger/models/trigger_action_schema.py | Juniper/healthbot-py-client | 49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6 | [
"Apache-2.0"
] | 5 | 2019-09-30T04:29:25.000Z | 2022-02-16T12:21:06.000Z | lib/jnpr/healthbot/swagger/models/trigger_action_schema.py | Juniper/healthbot-py-client | 49f0884b5d01ac8430aa7ed4c9acb4e7a2b717a6 | [
"Apache-2.0"
] | 4 | 2019-09-30T01:17:48.000Z | 2020-08-25T07:27:54.000Z | # coding: utf-8
"""
Paragon Insights APIs
API interface for PI application # noqa: E501
OpenAPI spec version: 4.0.0
Contact: healthbot-feedback@juniper.net
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class TriggerActionSchema(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'schedulers': 'list[str]'
}
attribute_map = {
'schedulers': 'schedulers'
}
def __init__(self, schedulers=None): # noqa: E501
"""TriggerActionSchema - a model defined in Swagger""" # noqa: E501
self._schedulers = None
self.discriminator = None
if schedulers is not None:
self.schedulers = schedulers
@property
def schedulers(self):
"""Gets the schedulers of this TriggerActionSchema. # noqa: E501
:return: The schedulers of this TriggerActionSchema. # noqa: E501
:rtype: list[str]
"""
return self._schedulers
@schedulers.setter
def schedulers(self, schedulers):
"""Sets the schedulers of this TriggerActionSchema.
:param schedulers: The schedulers of this TriggerActionSchema. # noqa: E501
:type: list[str]
"""
self._schedulers = schedulers
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(TriggerActionSchema, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, TriggerActionSchema):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.543103 | 84 | 0.569953 |
6b25d8e38cb061b862ae5a1a092ae36b81d7d898 | 24 | py | Python | oauth/textapis/__init__.py | Stuff7/stuff7 | c4210ad99c7d745ded3742a645cc9173243946b1 | [
"MIT"
] | null | null | null | oauth/textapis/__init__.py | Stuff7/stuff7 | c4210ad99c7d745ded3742a645cc9173243946b1 | [
"MIT"
] | null | null | null | oauth/textapis/__init__.py | Stuff7/stuff7 | c4210ad99c7d745ded3742a645cc9173243946b1 | [
"MIT"
] | null | null | null | from .textapis import *
| 12 | 23 | 0.75 |
1f8b48cf7c0f2fb57d2555e661c6a2346cb5bc59 | 2,698 | py | Python | airbyte-integrations/connectors/source-iterable/source_iterable/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 2 | 2022-03-02T13:46:05.000Z | 2022-03-05T12:31:28.000Z | airbyte-integrations/connectors/source-iterable/source_iterable/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 29 | 2021-10-07T17:20:29.000Z | 2021-12-27T13:07:09.000Z | airbyte-integrations/connectors/source-iterable/source_iterable/source.py | OTRI-Unipd/OTRI-airbyte | 50eeeb773f75246e86c6e167b0cd7d2dda6efe0d | [
"MIT"
] | 1 | 2022-03-11T06:21:24.000Z | 2022-03-11T06:21:24.000Z | #
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from typing import Any, List, Mapping, Tuple
from airbyte_cdk.models import SyncMode
from airbyte_cdk.sources import AbstractSource
from airbyte_cdk.sources.streams import Stream
from .api import (
Campaigns,
CampaignsMetrics,
Channels,
EmailBounce,
EmailClick,
EmailComplaint,
EmailOpen,
EmailSend,
EmailSendSkip,
EmailSubscribe,
EmailUnsubscribe,
Events,
Lists,
ListUsers,
MessageTypes,
Metadata,
Templates,
Users,
)
class SourceIterable(AbstractSource):
"""
Note: there are some redundant endpoints
(e.g. [`export/userEvents`](https://api.iterable.com/api/docs#export_exportUserEvents)
and [`events/{email}`](https://api.iterable.com/api/docs#events_User_events)).
In this case it's better to use the one which takes params as a query param rather than as part of the url param.
"""
def check_connection(self, logger, config) -> Tuple[bool, any]:
try:
list_gen = Lists(api_key=config["api_key"]).read_records(sync_mode=SyncMode.full_refresh)
next(list_gen)
return True, None
except Exception as e:
return False, f"Unable to connect to Iterable API with the provided credentials - {e}"
def streams(self, config: Mapping[str, Any]) -> List[Stream]:
return [
Campaigns(api_key=config["api_key"]),
CampaignsMetrics(api_key=config["api_key"], start_date=config["start_date"]),
Channels(api_key=config["api_key"]),
EmailBounce(api_key=config["api_key"], start_date=config["start_date"]),
EmailClick(api_key=config["api_key"], start_date=config["start_date"]),
EmailComplaint(api_key=config["api_key"], start_date=config["start_date"]),
EmailOpen(api_key=config["api_key"], start_date=config["start_date"]),
EmailSend(api_key=config["api_key"], start_date=config["start_date"]),
EmailSendSkip(api_key=config["api_key"], start_date=config["start_date"]),
EmailSubscribe(api_key=config["api_key"], start_date=config["start_date"]),
EmailUnsubscribe(api_key=config["api_key"], start_date=config["start_date"]),
Events(api_key=config["api_key"]),
Lists(api_key=config["api_key"]),
ListUsers(api_key=config["api_key"]),
MessageTypes(api_key=config["api_key"]),
Metadata(api_key=config["api_key"]),
Templates(api_key=config["api_key"], start_date=config["start_date"]),
Users(api_key=config["api_key"], start_date=config["start_date"]),
]
| 38 | 117 | 0.66086 |
7a41bd21013805ef6489ed747623125509f86c0f | 264 | py | Python | yatube/posts/apps.py | KostKH/MockTube | 58e5246784830cc3a95f528b852d3b98c2f1dcbb | [
"BSD-3-Clause"
] | null | null | null | yatube/posts/apps.py | KostKH/MockTube | 58e5246784830cc3a95f528b852d3b98c2f1dcbb | [
"BSD-3-Clause"
] | null | null | null | yatube/posts/apps.py | KostKH/MockTube | 58e5246784830cc3a95f528b852d3b98c2f1dcbb | [
"BSD-3-Clause"
] | null | null | null | from django.apps import AppConfig
class PostsConfig(AppConfig):
"""Класс нужен для конфигурации класса Post. В частности, дано
название класса для целей вывода на странице админа."""
name = 'posts'
verbose_name = 'Управление постами и группами'
| 26.4 | 66 | 0.734848 |
d8221f6210fc46410d0aab2911a03510db867759 | 9,911 | py | Python | accounts/views.py | XplosionDev/xplosionweb | 0bf899516495f91807e8bfa27dadf561c4437661 | [
"MIT"
] | null | null | null | accounts/views.py | XplosionDev/xplosionweb | 0bf899516495f91807e8bfa27dadf561c4437661 | [
"MIT"
] | null | null | null | accounts/views.py | XplosionDev/xplosionweb | 0bf899516495f91807e8bfa27dadf561c4437661 | [
"MIT"
] | null | null | null | from django.shortcuts import render
# Create your views here.
from django.contrib.auth import (authenticate,login,logout)
from random import randint
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.http import Http404
from .forms import UserLoginForm, UserRegisterForm,UserProfileinfo,PlayerRegistrationForm,teamregistration,TeamUpdate,PlayerUpdate,Useremailedit,Userflnameedit
from . import models
import math
def login_view(request):
print(request.user.is_authenticated())
next = request.GET.get('next')
title = "Login"
form = UserLoginForm(request.POST or None)
if form.is_valid():
username = form.cleaned_data.get("username")
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
"""if user.groups.count()==0:
g = Group.objects.get(name="Coach")
user.groups.add(g)"""
login(request, user)
if next:
return redirect('/')
return redirect('Coach_Profile')
return render(request, "registration/login.html", {"form":form, "title": title})
def view_profile(request, pk=None):
if pk:
user = User.objects.get(pk=pk)
else:
user = request.user
teams=user.profile.teams_set.all()
args = {'user': user, 'teams': teams}
return render(request, 'profile/Profile_Coach.html', args)
@login_required
def edit_profile(request, pk=None):
if request.method == 'POST':
form = UserProfileinfo(request.POST, request.FILES, instance=request.user.profile)
if form.is_valid():
messages.success(request, ('Your profile was successfully updated! Go to profile to see changes!'))
form.save()
if pk:
user = User.objects.get(pk=pk)
else:
user = request.user
profile= request.user.profile
args = {'user': user, 'profile': profile}
return render(request, 'profile/Profile_Coach_Edit_SuccessPage.html', args)
else:
messages.error(request, ('Please correct the error below.'))
form = UserProfileinfo(instance=request.user)
args = {'form': form}
return render(request, 'profile/Profile_Coach_Edit.html', args)
def email_edit(request):
form = Useremailedit(request.POST or None)
user=request.user
if request.method=='POST':
form = Useremailedit(request.POST)
if form.is_valid():
email=form.cleaned_data.get('email')
user.email=email
user.save()
return render(request,"profile/Profile_Coach_Edit_Email_SuccessPage.html")
context = {
"form": form,
}
return render(request, "profile/Profile_Coach_Edit_Email.html", context)
def flname_edit(request):
form = Userflnameedit(request.POST or None)
user=request.user
if request.method=='POST':
form = Userflnameedit(request.POST)
if form.is_valid():
fname=form.cleaned_data.get('first_name')
lname=form.cleaned_data.get('last_name')
user.first_name=fname
user.last_name=lname
user.save()
return render(request,"profile/Profile_Coach_Edit_flname_SuccessPage.html")
context = {
"form": form,
}
return render(request, "profile/Profile_Coach_Edit_flname.html", context)
def register_view(request):
next = request.GET.get('next')
form = UserRegisterForm(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
password = form.cleaned_data.get('password')
user.set_password(password)
user.is_confirmed=False
user.save()
new_user = authenticate(username=user.username, password=password)
# g = Group.objects.get(name="Coach")
#new_user.groups.add(g)
login(request, new_user)
if next:
return redirect(next)
return render(request, "profile/Profile_Coach.html")
context = {
"form": form,
}
return render(request, "profile/Create_Account_Coach.html", context)
@login_required
def team_view(request,pk=None):
if request.method == 'POST':
form = teamregistration(request.POST)
if form.is_valid():
user=request.user.profile
messages.success(request, ('Your team has been added!'))
team=form.save(commit=False)
team.coach=user
team.save()
if pk:
user = User.objects.get(pk=pk)
else:
user = request.user
args = {'user': user}
return render(request, 'profile/Profile_Coach_Add_Team_SuccessPage.html', args)
else:
messages.error(request, ('Please correct the error below.'))
form = teamregistration(request.POST or None)
args = {'form': form}
return render(request, 'profile/Profile_Coach_Add_Team.html', args)
def Player_view(request,pk=None):
if request.method == 'POST':
form = PlayerRegistrationForm(request.POST,user=request.user.profile)
if form.is_valid():
messages.success(request, ('Player has been added!'))
Player=form.save(commit=False)
Player.password= User.objects.make_random_password(length=8)
first=Player.first_name
last=Player.last_name
Player.username=first[0]+last+str(randint(0,50))
Player.type="Coach-Dependent"
Player.save()
if pk:
user = User.objects.get(pk=pk)
else:
user = request.user
args = {'user': user}
return render(request, 'profile/Profile_Coach_Add_Player_SuccessPage.html', args)
else:
messages.error(request, ('Please correct the error below.'))
form = PlayerRegistrationForm(request.POST or None,user=request.user.profile)
args = {'form': form}
return render(request, 'profile/Profile_Coach_Add_Player.html', args)
def logout_view(request):
logout(request)
return render(request, "registration/logout.html")
def index(request):
return render(request, "profile/index.html")
def Coach_Profile_Player_View(request,player_id):
try:
player= models.Players.objects.get(id=player_id).swings_set.all()
players= models.Players.objects.get(id=player_id).swings_set.all()
arraySize = len(player)
distanceList = [arraySize]
except models.Players.DoesNotExist:
raise Http404("Player Does not exist")
for player in player:
vector=math.sqrt(((player.start_pos_x-player.end_pos_x)**2)+((player.start_pos_y-player.end_pos_y)**2)+((player.start_pos_z-player.end_pos_z)**2))
distanceList.append(vector)
args={'player':players, 'vector':distanceList}
return render(request, 'profile/Profile_Coach_Player_View.html', args)
def Coach_Profile_Player_View_Update(request,player_id):
try:
player= models.Players.objects.get(id=player_id)
if request.method == 'POST':
form = PlayerUpdate(request.POST, instance=player)
if form.is_valid():
form.save()
return render(request, 'profile/Profile_Coach_Team_SuccessPage.html')
else:
messages.error(request, ('Please correct the error below.'))
except models.Teams.DoesNotExist:
raise Http404("Team does not exist")
form = PlayerUpdate(request.POST or None,instance=player)
args = {'form': form}
return render(request, 'profile/Profile_Coach_Player_Update.html', args)
def Coach_Profile_Player_View_Delete(request,player_id):
try:
models.Players.objects.get(id=player_id).delete()
except models.Teams.DoesNotExist:
raise Http404("Player does not exist")
return render(request, 'profile/Profile_Coach_Team_SuccessPage.html')
def Coach_Profile_Team(request):
team = request.user.profile.teams_set.all()
args = {'team':team}
return render(request, 'profile/Profile_Coach_Team.html', args)
def Coach_Profile_Team_View(request,team_id):
try:
team=models.Teams.objects.get(id=team_id).players_set.all()
#players=models.Players.objects.filter(team=team).all()
#players=models.Players.objects.filter(team=team).get()
except models.Teams.DoesNotExist:
raise Http404("Team does not exist")
#players=team.players_set.all()
return render(request, 'profile/Profile_Coach_Team_View.html', {'players':team})
def Coach_Profile_Team_View_Update(request,team_id):
try:
team = models.Teams.objects.get(id=team_id)
if request.method == 'POST':
form = TeamUpdate(request.POST, instance=team)
if form.is_valid():
form.save()
return render(request, 'profile/Profile_Coach_Team_SuccessPage.html')
else:
messages.error(request, ('Please correct the error below.'))
except models.Teams.DoesNotExist:
raise Http404("Team does not exist")
form = TeamUpdate(request.POST or None,instance=team)
args = {'form': form}
return render(request, 'profile/Profile_Coach_Team_Update.html', args)
def Coach_Profile_Team_View_Delete(request,team_id):
try:
models.Teams.objects.get(id=team_id).delete()
except models.Teams.DoesNotExist:
raise Http404("Team does not exist")
return render(request, 'profile/Profile_Coach_Team_SuccessPage.html')
def Coach_Delete(request):
request.user.delete()
return render(request, 'profile/Profile_Coach_Delete_SuccessPage.html')
def Coach_Delete_Confirmation(request):
return render(request, 'profile/Profile_Coach_Delete_Confirmation.html') | 35.021201 | 159 | 0.660176 |
c33ae80513db5e7587a493a7d8ea75dc73bac405 | 601 | py | Python | google/storage/speckle/python/tool/__init__.py | MiCHiLU/google_appengine_sdk | 3da9f20d7e65e26c4938d2c4054bc4f39cbc5522 | [
"Apache-2.0"
] | 1,463 | 2015-04-07T09:41:28.000Z | 2022-03-30T02:05:19.000Z | AppServer/google/storage/speckle/python/tool/__init__.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 1,361 | 2015-01-08T23:09:40.000Z | 2020-04-14T00:03:04.000Z | AppServer/google/storage/speckle/python/tool/__init__.py | nlake44/appscale | 6944af660ca4cb772c9b6c2332ab28e5ef4d849f | [
"Apache-2.0"
] | 698 | 2015-04-28T12:02:00.000Z | 2022-03-19T23:53:55.000Z | #!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
| 35.352941 | 74 | 0.753744 |
1338c5f9645748f2e93415aae3904f59513cc23a | 3,173 | py | Python | isi_sdk_8_0/isi_sdk_8_0/models/nfs_exports_summary_summary.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 24 | 2018-06-22T14:13:23.000Z | 2022-03-23T01:21:26.000Z | isi_sdk_8_0/isi_sdk_8_0/models/nfs_exports_summary_summary.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 46 | 2018-04-30T13:28:22.000Z | 2022-03-21T21:11:07.000Z | isi_sdk_8_0/isi_sdk_8_0/models/nfs_exports_summary_summary.py | mohitjain97/isilon_sdk_python | a371f438f542568edb8cda35e929e6b300b1177c | [
"Unlicense"
] | 29 | 2018-06-19T00:14:04.000Z | 2022-02-08T17:51:19.000Z | # coding: utf-8
"""
Isilon SDK
Isilon SDK - Language bindings for the OneFS API # noqa: E501
OpenAPI spec version: 3
Contact: sdk@isilon.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class NfsExportsSummarySummary(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'int'
}
attribute_map = {
'count': 'count'
}
def __init__(self, count=None): # noqa: E501
"""NfsExportsSummarySummary - a model defined in Swagger""" # noqa: E501
self._count = None
self.discriminator = None
self.count = count
@property
def count(self):
"""Gets the count of this NfsExportsSummarySummary. # noqa: E501
The count of objects in the collection # noqa: E501
:return: The count of this NfsExportsSummarySummary. # noqa: E501
:rtype: int
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this NfsExportsSummarySummary.
The count of objects in the collection # noqa: E501
:param count: The count of this NfsExportsSummarySummary. # noqa: E501
:type: int
"""
if count is None:
raise ValueError("Invalid value for `count`, must not be `None`") # noqa: E501
self._count = count
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, NfsExportsSummarySummary):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.353448 | 91 | 0.564135 |
0920c0b9bd96fd6874aef1ad6ae5cd7c027906bc | 11,510 | py | Python | visualize.py | onlyrico/mling_sdgms | ef6015d1a815a317f16fa1e42cbb048e4fe443f7 | [
"MIT"
] | 4 | 2021-06-01T02:06:57.000Z | 2022-02-23T02:14:07.000Z | visualize.py | onlyrico/mling_sdgms | ef6015d1a815a317f16fa1e42cbb048e4fe443f7 | [
"MIT"
] | null | null | null | visualize.py | onlyrico/mling_sdgms | ef6015d1a815a317f16fa1e42cbb048e4fe443f7 | [
"MIT"
] | 2 | 2021-01-28T05:48:20.000Z | 2022-01-24T11:59:13.000Z | # -*- coding: UTF-8 -*-
#!/usr/bin/python3
"""
"""
#************************************************************
# Imported Libraries
#************************************************************
import sys
import os
import matplotlib.pyplot as plt
from scipy.misc import imread
import torch
import numpy as np
import faiss
from args import create_args
from params import Params
from main import get_pretrained_model, get_vocabs, print_info
import train_xling
import train_cldc
from train_cldc import get_batch
from nn_model.xlingva import XlingVA
from nn_model.cldc_model import CLDCModel
from nn_model.semicldc_model import SEMICLDCModel
import pdb
def plot_drplan(df):
"""
4 settings
MONO (en)
MONO (de)
XLING
ADVXLING
"""
beta = beta = df.columns[1:].tolist()
fig, ax1 = plt.subplots()
ax2 = ax1.twinx()
ax1.set_xlabel('KL Value')
ax1.set_ylabel('NLL Value')
ax2.set_ylabel('Acc')
# MONO (en)
mono_kl_en = df[df['beta'] == 'MONO_DEV_KL (en)'].iloc[0][1:].tolist()
mono_nll_en = df[df['beta'] == 'MONO_DEV_NLL (en)'].iloc[0][1:].tolist()
mono_cldc_acc_en = [float("{:.3f}".format(i)) for i in df[df['beta'] == 'MONO_MDC_DEV_VAEEMB (en)'].iloc[0][1:].tolist()]
ax1.plot(mono_kl_en, mono_nll_en, marker= 'o', color = 'r', label = 'MONO EN')
ax2.plot(mono_kl_en, mono_cldc_acc_en, marker = 'v', color = 'g', label = 'MONO EN')
for i, txt in enumerate(beta):
#ax1.annotate(txt, (mono_kl_en[i], mono_nll_en[i]))
ax2.annotate(mono_cldc_acc_en[i], (mono_kl_en[i], mono_cldc_acc_en[i]))
# MONO (de)
mono_kl_de = df[df['beta'] == 'MONO_DEV_KL (de)'].iloc[0][1:].tolist()
mono_nll_de = df[df['beta'] == 'MONO_DEV_NLL (de)'].iloc[0][1:].tolist()
mono_cldc_acc_de = [float("{:.3f}".format(i)) for i in df[df['beta'] == 'MONO_MDC_DEV_VAEEMB (de)'].iloc[0][1:].tolist()]
ax1.plot(mono_kl_de, mono_nll_de, marker= 'o', color = 'tomato', label = 'MONO DE')
ax2.plot(mono_kl_de, mono_cldc_acc_de, marker = 'v', color = 'lightgreen', label = 'MONO DE')
for i, txt in enumerate(beta):
#ax1.annotate(txt, (mono_kl_de[i], mono_nll_de[i]))
ax2.annotate(mono_cldc_acc_de[i], (mono_kl_de[i], mono_cldc_acc_de[i]))
# XLING
xling_kl = df[df['beta'] == 'XLING_DEV_KL'].iloc[0][1:].tolist()
xling_nll = df[df['beta'] == 'XLING_DEV_NLL'].iloc[0][1:].tolist()
xling_cldc_acc_en = [float("{:.3f}".format(i)) for i in df[df['beta'] == 'XLING_MDC_DEV_VAEEMB (en)'].iloc[0][1:].tolist()]
xling_cldc_acc_de = [float("{:.3f}".format(i)) for i in df[df['beta'] == 'XLING_MDC_DEV_VAEEMB (de)'].iloc[0][1:].tolist()]
ax1.plot(xling_kl, xling_nll, marker= 'o', color = 'fuchsia', label = 'XLING')
ax2.plot(xling_kl, xling_cldc_acc_en, marker = 'v', color = 'darkblue', label = 'XLING EN')
ax2.plot(xling_kl, xling_cldc_acc_de, marker = 'v', color = 'cornflowerblue', label = 'XLING DE')
for i, txt in enumerate(beta):
#ax1.annotate(txt, (xling_kl[i], xling_nll[i]))
ax2.annotate(xling_cldc_acc_en[i], (xling_kl[i], xling_cldc_acc_en[i]))
ax2.annotate(xling_cldc_acc_de[i], (xling_kl[i], xling_cldc_acc_de[i]))
# ADVXLING
advxling_kl = df[df['beta'] == 'ADVXLING_DEV_KL'].iloc[0][1:].tolist()
advxling_nll = df[df['beta'] == 'ADVXLING_DEV_NLL'].iloc[0][1:].tolist()
advxling_cldc_acc_en = [float("{:.3f}".format(i)) for i in df[df['beta'] == 'ADVXLING_MDC_DEV_VAEEMB (en)'].iloc[0][1:].tolist()]
advxling_cldc_acc_de = [float("{:.3f}".format(i)) for i in df[df['beta'] == 'ADVXLING_MDC_DEV_VAEEMB (de)'].iloc[0][1:].tolist()]
ax1.plot(advxling_kl, advxling_nll, marker= 'o', color = 'violet', label = 'ADVXLING')
ax2.plot(advxling_kl, advxling_cldc_acc_en, marker = 'v', color = 'gold', label = 'ADVXLING EN')
ax2.plot(advxling_kl, advxling_cldc_acc_de, marker = 'v', color = 'yellow', label = 'ADVXLING DE')
for i, txt in enumerate(beta):
#ax1.annotate(txt, (advxling_kl[i], advxling_nll[i]))
ax2.annotate(advxling_cldc_acc_en[i], (advxling_kl[i], advxling_cldc_acc_en[i]))
ax2.annotate(advxling_cldc_acc_de[i], (advxling_kl[i], advxling_cldc_acc_de[i]))
ax1.legend(loc = 'upper center', bbox_to_anchor=(0.5, -0.05), ncol=4, fancybox=True, shadow=True)
ax2.legend(loc = 'upper center', bbox_to_anchor=(0.5, 1.05), ncol=6, fancybox=True, shadow=True)
plt.show()
fig.savefig('{}.pdf'.format('pretrain_mdc'))
def gen_z_pretrain(params, datas, xlingva):
xlingva.eval()
bs = 5000
for lang, lang_idx in params.lang_dict.items():
with open('./z.{}.out'.format(lang), 'w') as fout:
data = datas[lang_idx]
n_batch = data.dev_size // bs if data.dev_size % bs == 0 else data.dev_size // bs + 1
data_idxs = list(range(data.dev_size))
fout.write('{} {}\n'.format(data.dev_size, params.z_dim))
for k in range(n_batch):
test_idxs = data_idxs[k * bs: (k + 1) * bs]
# get padded & sorted batch idxs and
with torch.no_grad():
padded_batch, batch_lens = train_xling.get_batch(test_idxs, data, data.dev_idxs, data.dev_lens, params.cuda)
mu, logvar = xlingva.get_gaus(lang, padded_batch, batch_lens)
batch_text = data.idx2text(padded_batch.cpu().tolist(), idx_lens = batch_lens.tolist())
assert(len(batch_text) == mu.shape[0])
mu = mu.cpu().tolist()
z_embs = list(zip(batch_text, mu))
z_embs = ['{} {}'.format(w[0], ' '.join(list(map(lambda x: str(x), w[1])))) for w in z_embs]
fout.write('{}\n'.format('\n'.join(z_embs)))
def gen_z_cldc(params, datas, m):
m.eval()
bs = 5000
for lang, lang_idx in params.lang_dict.items():
with open('./z.cldc.{}.out'.format(lang), 'w') as fout:
data = datas[lang_idx]
n_batch = data.dev_size // bs if data.dev_size % bs == 0 else data.dev_size // bs + 1
data_idxs = [list(range(len(dev_idx))) for dev_idx in data.dev_idxs]
fout.write('{} {}\n'.format(data.dev_size, params.z_dim))
for j in range(n_batch):
test_idxs = []
for k, data_idx in enumerate(data_idxs):
if j < n_batch - 1:
test_idxs.append(data_idx[j * int(bs * data.dev_prop[k]): (j + 1) * int(bs * data.dev_prop[k])])
elif j == n_batch - 1:
test_idxs.append(data_idx[j * int(bs * data.dev_prop[k]):])
with torch.no_grad():
batch_in, batch_lens, batch_lb = train_cldc.get_batch(params, test_idxs, data.dev_idxs, data.dev_lens)
mu, logvar = m.get_gaus(lang, batch_in, batch_lens)
batch_text = data.idx2text(batch_in.cpu().tolist(), batch_lb.tolist(), idx_lens = batch_lens.tolist())
assert(len(batch_text) == mu.shape[0])
mu = mu.cpu().tolist()
z_embs = list(zip(batch_text, mu))
z_embs = ['{} {}'.format(w[0], ' '.join(list(map(lambda x: str(x), w[1])))) for w in z_embs]
fout.write('{}\n'.format('\n'.join(z_embs)))
def show_lasth(img_dir):
mng = plt.get_current_fig_manager()
mng.window.showMaximized()
plt.ion()
for root, dirs, files in os.walk(img_dir):
files.sort(key=lambda x: os.stat(os.path.join(root, x)).st_mtime)
for i, file_name in enumerate(files):
img = imread(os.path.join(root, file_name))
plt.imshow(img)
plt.axis('off')
plt.tight_layout()
plt.title('Number {}'.format(i))
plt.pause(0.1)
plt.clf()
def get_mu1(params, m, input_idxs, input_lens, input_size, lang):
m.eval()
n_batch = input_size // params.cldc_test_bs if input_size % params.cldc_test_bs == 0 else input_size // params.cldc_test_bs + 1
data_idxs = [list(range(len(input_idx))) for input_idx in input_idxs]
mu1s = []
test_idxs = []
for i, data_idx in enumerate(data_idxs):
test_idxs = [[]] * i + [data_idx] + [[]] * (len(data_idxs) - 1 - i)
with torch.no_grad():
batch_in, batch_lens, batch_lb = get_batch(params, test_idxs, input_idxs, input_lens)
if params.task == 'cldc':
# embedding
input_word_embs = m.embeddings(lang, batch_in)
# encoding
hid = m.encoder(input_word_embs, batch_lens)
# infering
mu1, logvar1 = m.inferer(hid)
elif 'semi' in params.task:
mu1, logvar1, z1 = m.get_z1(lang, batch_in, batch_lens)
mu1s.append(mu1)
return mu1s
def search_and_select(dev_xs, train_xs):
y_names = ['C', 'E', 'G', 'M']
y_idxs = [0, 1, 2, 3]
idx_map = {}
for idx, label in zip(y_idxs, y_names):
xq = torch.mean(dev_xs[idx], dim = 0).unsqueeze(0).cpu().numpy()
xb = train_xs[idx].cpu().numpy()
d = xb.shape[-1]
k = xb.shape[0]
# faiss search
index = faiss.IndexFlatL2(d)
index.add(xb)
D, I = index.search(xq, k)
idx_map[idx] = I
torch.save(idx_map, '{}.pth'.format(''.join(y_names)))
if __name__ == '__main__':
# [vis_z, vis_lasth, sel_exp]
vis_type = 'sel_exp'
arguments = create_args()
params = Params(arguments)
# load pretrained model
vocab_dict, model_dict = get_pretrained_model(params)
# get vocabs for different languages
vocabs = get_vocabs(params, vocab_dict)
# general info
print_info(params, vocabs)
datas = train_cldc.get_data(params, vocabs)
assert (len(datas) == 2)
train_lang, train_data = train_cldc.get_lang_data(params, datas, training = True)
test_lang, test_data = train_cldc.get_lang_data(params, datas)
# use pretraining data & model
if vis_type == 'vis_z':
gen_z_cldc(params, datas, m)
# gen z for pretrained corpus
'''
xlingva = XlingVA(params, datas, model_dict = model_dict)
gen_z_pretrain(params, datas, xlingva)
'''
# gen z for cldc corpus
m = CLDCModel(params, datas, model_dict=model_dict)
elif vis_type == 'vis_lasth':
if params.task == 'cldc':
m = CLDCModel(params, datas, params.cldc_classifier_config)
elif params.task == 'semi-cldc':
m = SEMICLDCModel(params, datas)
# get the current module parameter dicts
cur_model_dict = m.state_dict()
# 1. filter out unnecessary keys
# new code model
filtered_model_dict = {k: model_dict[k] for k in cur_model_dict}
assert (set(filtered_model_dict.keys()) == set(cur_model_dict.keys()))
# 2. overwrite entries in the existing state dict
cur_model_dict.update(filtered_model_dict)
# 3. load the new state dict
m.load_state_dict(cur_model_dict)
train_cldc.gen_task_info(params, m, datas)
'''
train_cldc.test(params, m, test_data.dev_idxs, test_data.dev_lens, test_data.dev_size,
test_data.dev_prop, test_lang, vis = True)
'''
train_cldc.test(params, m, test_data.test_idxs, test_data.test_lens, test_data.test_size,
test_data.test_prop, test_lang, vis = True)
train_cldc.tsne2d(params, m)
'''
img_dir = sys.argv[1]
show_lasth(img_dir)
'''
elif vis_type == 'sel_exp':
train_size = 16
if params.task == 'cldc':
m = CLDCModel(params, datas, params.cldc_classifier_config, model_dict = model_dict)
elif params.task == 'semi-cldc':
m = SEMICLDCModel(params, datas, model_dict = model_dict)
#y_names, y_idxs = params.cldc_label2idx.keys(), params.cldc_label2idx.values()
train_cldc.gen_task_info(params, m, datas)
dev_xs = get_mu1(params, m, test_data.dev_idxs, test_data.dev_lens, test_data.dev_size, test_lang)
train_xs = get_mu1(params, m, train_data.train_idxs, train_data.train_lens, train_data.train_size, train_lang)
search_and_select(dev_xs, train_xs)
'''
in_path = sys.argv[1]
df = pd.read_csv(in_path, sep = '\t')
plot_drplan(df)
'''
| 39.417808 | 131 | 0.646829 |
c7903dfb7b826c1c044f0e56d6ecbc71f8edbac1 | 248 | py | Python | pyalp/gs_interface/__init__.py | Mause/pyalp | fb0f723070e11f8c9ed57e2475eb963599f442a6 | [
"MIT"
] | null | null | null | pyalp/gs_interface/__init__.py | Mause/pyalp | fb0f723070e11f8c9ed57e2475eb963599f442a6 | [
"MIT"
] | 2 | 2021-06-08T19:32:48.000Z | 2022-03-11T23:17:45.000Z | pyalp/gs_interface/__init__.py | Mause/pyalp | fb0f723070e11f8c9ed57e2475eb963599f442a6 | [
"MIT"
] | null | null | null | from os.path import join, dirname, exists, isdir
HERE = dirname(__file__)
KEYS_DIR = join(HERE, 'keys')
assert exists(KEYS_DIR) and isdir(KEYS_DIR)
def init():
import sys
sys.path.insert(
0,
join(HERE, 'tinyrpc')
)
| 15.5 | 48 | 0.637097 |
be2d55a631fd7131ea469a6f7ceab8b841b22982 | 3,511 | py | Python | experiments-xml/makeInterventions.py | CherryKitty/PROTON-OC | a116103aed3f2286db40b092c2d797a59e8b1a39 | [
"MIT"
] | 3 | 2020-02-23T10:33:55.000Z | 2021-04-16T08:11:29.000Z | experiments-xml/makeInterventions.py | Deh18/PROTON-OC | 09f74038adadee13dfb61d3264fe0171a2db6612 | [
"MIT"
] | 46 | 2020-04-24T15:50:19.000Z | 2021-09-13T09:53:47.000Z | experiments-xml/makeInterventions.py | Deh18/PROTON-OC | 09f74038adadee13dfb61d3264fe0171a2db6612 | [
"MIT"
] | 1 | 2021-12-25T01:36:48.000Z | 2021-12-25T01:36:48.000Z | import xml.etree.ElementTree as ET
repetitions = 20
#pretty print method
def indent(elem, level=0):
i = "\n" + level*" "
j = "\n" + (level-1)*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for subelem in elem:
indent(subelem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = j
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = j
return elem
# ----------------------------------
version="rp0.4a"
tree = ET.parse('rp_base.xml')
root = tree.getroot()
al = tree.find('.//experiment')
al.set('name', version)
al.set('repetitions', str(int(repetitions/3)))
al = tree.find('.//enumeratedValueSet[@variable="intervention"]')
al.insert(1, ET.Element("value", value='"preventive"'))
al.insert(1, ET.Element("value", value='"disruptive"'))
#write to file
tree = ET.ElementTree(indent(root))
tree.write(version + '.xml', encoding='utf-8')
# ----------------------------------
version="rp0.4b-facilitators"
tree = ET.parse('rp_base.xml')
root = tree.getroot()
al = tree.find('.//experiment')
al.set('name', version)
al.set('repetitions', str(int(repetitions)))
al = tree.find('.//enumeratedValueSet[@variable="intervention"]')
for x in al.getchildren():
al.remove(x)
al.insert(1, ET.Element("value", value='"facilitators"'))
#write to file
tree = ET.ElementTree(indent(root))
tree.write(version + '.xml', encoding='utf-8')
# ----------------------------------
version="rp0.4b-students"
tree = ET.parse('rp_base.xml')
root = tree.getroot()
al = tree.find('.//experiment')
al.set('name', version)
al.set('repetitions', str(int(repetitions)))
al = tree.find('.//enumeratedValueSet[@variable="intervention"]')
for x in al.getchildren():
al.remove(x)
al.insert(1, ET.Element("value", value='"students"'))
#write to file
tree = ET.ElementTree(indent(root))
tree.write(version + '.xml', encoding='utf-8')
# watch out, this should have changed much more to become rp5
# ----------------------------------
version="rp0.5a"
tree = ET.parse('rp_base.xml')
root = tree.getroot()
al = tree.find('.//experiment')
al.set('name', version)
al.set('repetitions', str(int(repetitions/3)))
al = tree.find('.//enumeratedValueSet[@variable="data-folder"]')
for x in al.getchildren():
al.remove(x)
al.insert(1, ET.Element("value", value='"inputs/eindhoven/data/"'))
al = tree.find('.//enumeratedValueSet[@variable="intervention"]')
al.insert(1, ET.Element("value", value='"preventive"'))
al.insert(1, ET.Element("value", value='"disruptive"'))
#write to file
tree = ET.ElementTree(indent(root))
tree.write(version + '.xml', encoding='utf-8')
# ----------------------------------
version="rp0.5b"
tree = ET.parse('rp_base.xml')
root = tree.getroot()
al = tree.find('.//experiment')
al.set('name', version)
al.set('repetitions', str(int(repetitions/3)))
al = tree.find('.//enumeratedValueSet[@variable="data-folder"]')
for x in al.getchildren():
al.remove(x)
al.insert(1, ET.Element("value", value='"inputs/eindhoven/data/"'))
al = tree.find('.//enumeratedValueSet[@variable="intervention"]')
for x in al.getchildren():
al.remove(x)
al.insert(1, ET.Element("value", value='"students"'))
al.insert(1, ET.Element("value", value='"facilitators"'))
#write to file
tree = ET.ElementTree(indent(root))
tree.write(version + '.xml', encoding='utf-8')
| 27.645669 | 67 | 0.623754 |
2a902aff660471b0fdc2abce4cdad1d42563b84d | 2,101 | py | Python | nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | 7 | 2017-02-17T08:54:26.000Z | 2022-03-10T20:57:23.000Z | nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | 1 | 2016-04-25T15:07:09.000Z | 2016-04-25T15:07:09.000Z | nipype/interfaces/slicer/filtering/tests/test_auto_N4ITKBiasFieldCorrection.py | moloney/nipype | a7a9c85c79cb1412ba03406074f83200447ef50b | [
"Apache-2.0"
] | 2 | 2017-09-23T16:22:00.000Z | 2019-08-01T14:18:52.000Z | # AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..n4itkbiasfieldcorrection import N4ITKBiasFieldCorrection
def test_N4ITKBiasFieldCorrection_inputs():
input_map = dict(
args=dict(argstr='%s', ),
bsplineorder=dict(argstr='--bsplineorder %d', ),
convergencethreshold=dict(argstr='--convergencethreshold %f', ),
environ=dict(
nohash=True,
usedefault=True,
),
histogramsharpening=dict(
argstr='--histogramsharpening %s',
sep=',',
),
ignore_exception=dict(
deprecated='1.0.0',
nohash=True,
usedefault=True,
),
inputimage=dict(argstr='--inputimage %s', ),
iterations=dict(
argstr='--iterations %s',
sep=',',
),
maskimage=dict(argstr='--maskimage %s', ),
meshresolution=dict(
argstr='--meshresolution %s',
sep=',',
),
outputbiasfield=dict(
argstr='--outputbiasfield %s',
hash_files=False,
),
outputimage=dict(
argstr='--outputimage %s',
hash_files=False,
),
shrinkfactor=dict(argstr='--shrinkfactor %d', ),
splinedistance=dict(argstr='--splinedistance %f', ),
terminal_output=dict(
deprecated='1.0.0',
nohash=True,
),
weightimage=dict(argstr='--weightimage %s', ),
)
inputs = N4ITKBiasFieldCorrection.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_N4ITKBiasFieldCorrection_outputs():
output_map = dict(
outputbiasfield=dict(),
outputimage=dict(),
)
outputs = N4ITKBiasFieldCorrection.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| 32.323077 | 72 | 0.57544 |
32f578b389906de5a6d3396caa40edaa80eb7e3e | 496 | py | Python | myproject/src/calender/migrations/0007_studio_calendar_hourly_rate.py | Denny143/Online-Community-Project | d2e754b2ab5da195ed1846dd99bc7b1d40dfe246 | [
"MIT"
] | null | null | null | myproject/src/calender/migrations/0007_studio_calendar_hourly_rate.py | Denny143/Online-Community-Project | d2e754b2ab5da195ed1846dd99bc7b1d40dfe246 | [
"MIT"
] | null | null | null | myproject/src/calender/migrations/0007_studio_calendar_hourly_rate.py | Denny143/Online-Community-Project | d2e754b2ab5da195ed1846dd99bc7b1d40dfe246 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-14 22:56
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('calender', '0006_auto_20170814_1324'),
]
operations = [
migrations.AddField(
model_name='studio_calendar',
name='Hourly_rate',
field=models.DecimalField(decimal_places=2, max_digits=10, null=True),
),
]
| 23.619048 | 82 | 0.639113 |
7d9adeede0a17498813f64e9a328592d7cf6de04 | 264 | py | Python | app/site_company/urls.py | anhlt59/django_refactor | 1b1d86af9f732a000e30feb7644f6ca60b6e516a | [
"MIT"
] | null | null | null | app/site_company/urls.py | anhlt59/django_refactor | 1b1d86af9f732a000e30feb7644f6ca60b6e516a | [
"MIT"
] | null | null | null | app/site_company/urls.py | anhlt59/django_refactor | 1b1d86af9f732a000e30feb7644f6ca60b6e516a | [
"MIT"
] | null | null | null | from django.urls import path, include
app_name = "students"
urlpatterns = [
# path("mypage/", include("app.site_student.mypage.urls"), name="mypage"),
path("student-profile/", include("app.site_student.student_profile.urls"), name="student_profile"),
]
| 26.4 | 103 | 0.712121 |
9b39431446d317ed5504ff79311ea322c9dbdfa1 | 38 | py | Python | shepherd/__init__.py | iterait/shepherd | 0847c9885584378dd68a48c40d03f9bb02b2b57c | [
"MIT"
] | 5 | 2018-10-13T19:03:07.000Z | 2019-02-25T06:44:27.000Z | shepherd/__init__.py | iterait/shepherd | 0847c9885584378dd68a48c40d03f9bb02b2b57c | [
"MIT"
] | 62 | 2018-09-13T08:03:39.000Z | 2022-01-03T09:05:54.000Z | shepherd/__init__.py | iterait/shepherd | 0847c9885584378dd68a48c40d03f9bb02b2b57c | [
"MIT"
] | null | null | null | __all__ = ['']
__version__ = '0.5.3'
| 9.5 | 21 | 0.552632 |
d0b5a3072a67cb12761ab292b2ea8455b5daf226 | 9,420 | py | Python | sl1m/planner_scenarios/talos/rubble_stairs.py | JasonChmn/sl1m | 87a4af495cc59568fb0f65df0f20d8d0d6c116cd | [
"BSD-2-Clause"
] | 7 | 2020-01-26T15:51:53.000Z | 2021-11-18T08:22:27.000Z | sl1m/planner_scenarios/talos/rubble_stairs.py | JasonChmn/sl1m | 87a4af495cc59568fb0f65df0f20d8d0d6c116cd | [
"BSD-2-Clause"
] | 8 | 2020-05-10T16:39:23.000Z | 2021-08-31T15:21:10.000Z | sl1m/planner_scenarios/talos/rubble_stairs.py | JasonChmn/sl1m | 87a4af495cc59568fb0f65df0f20d8d0d6c116cd | [
"BSD-2-Clause"
] | 10 | 2020-01-23T10:11:41.000Z | 2022-02-17T10:51:07.000Z | import numpy as np
from sl1m.constants_and_tools import *
from numpy import array, asmatrix, matrix, zeros, ones
from numpy import array, dot, stack, vstack, hstack, asmatrix, identity, cross, concatenate
from numpy.linalg import norm
from sl1m.planner import *
from sl1m.tools.plot_plytopes import *
Z_AXIS = np.array([0,0,1]).T
from sl1m.planner_scenarios.talos.constraints import *
### HARDCODED SURFACES, REPLACE IT WITH PATH PLANNING ####
floor = [[-0.30, 0.54 , 0. ], [-0.1 , 0.54, 0. ], [-0.1 , -0.46, 0. ], [-0.30, -0.46, 0. ], ]
step1 = [[ 0.01, 0.54 , 0.1 ], [0.31 , 0.54, 0.1], [0.31 , -0.46, 0.1 ], [ 0.01, -0.46, 0.1 ], ]
step2 = [[ 0.31, 0.54 , 0.2 ], [0.61 , 0.54, 0.2], [0.61 , -0.46, 0.2 ], [ 0.31, -0.46, 0.2 ], ]
step3 = [[ 0.61, 0.54 , 0.3 ], [0.91 , 0.54, 0.3], [0.91 , -0.46, 0.3 ], [ 0.61, -0.46, 0.3 ], ]
step4 = [[ 0.91, 0.54 , 0.4 ], [1.21 , 0.54, 0.4], [1.21 , -0.46, 0.4 ], [ 0.91, -0.46, 0.4 ], ]
step5 = [[ 1.24, 0.54 , 0.5 ], [1.51 , 0.54, 0.5], [1.51 , -0.46, 0.5 ], [ 1.24, -0.46, 0.5 ], ]
step6 = [[ 1.55, 0.54 , 0.6 ], [1.81 , 0.54, 0.6], [1.81 , -0.46, 0.6 ], [ 1.55, -0.46, 0.6 ], ]
#~ step7 = [[ 1.51, 0.94 , 0.6 ], [2.51 , 0.94, 0.6], [2.51 , -1.06, 0.6 ], [ 1.51, -1.06, 0.6 ], ]
step7 = [[ 1.51,-0.46 , 0.6 ], [1.81 , -0.46, 0.6], [1.81 , -0.76, 0.6 ], [ 1.51, -0.76, 0.6 ], ]
bridge = [[ 1.51, -0.46 , 0.6 ], [1.51 , -0.76, 0.6], [-1.49, -0.76, 0.6 ], [-1.49, -0.46, 0.6 ], ]
#~ platfo = [[-1.49, -0.06 , 0.6 ], [-1.49, -1.06, 0.6], [-2.49, -1.06, 0.6 ], [-2.49, -0.06, 0.6 ], ]
platfo = [[-1.49, -0.35, 0.6 ], [-1.49, -1.06, 0.6], [-2.49, -1.06, 0.6 ], [-2.49, -0.35, 0.6 ], ]
#~ step8 = [[-1.49, -0.06 , 0.45], [-1.49, 0.24, 0.45],[-2.49, 0.24, 0.45], [-2.49, -0.06, 0.45], ]
#~ step9 = [[-1.49, 0.24 , 0.30], [-1.49, 0.54, 0.30],[-2.49, 0.54, 0.30], [-2.49, 0.24, 0.30], ]
#~ step10= [[-1.49, 0.54 , 0.15], [-1.49, 0.84, 0.15],[-2.49, 0.84, 0.15], [-2.49, 0.54, 0.15], ]
slope = [[-1.49, -0.06 , 0.6 ], [-1.49, 1.5, 0.], [-2.49, 1.5, 0. ], [-2.49, -0.06, 0.6 ], ]
rub2 = [[ -2.11, 0.19 , 0.05 ], [-2.45 , 0.19, 0.05 ], [ -2.45, 0.53, 0.05 ], [-2.11, 0.53, 0.05 ], ]
rub3 = [[ -1.91, -0.15 , 0.1 ], [-2.25 , -0.15, 0.1 ], [ -2.25, 0.15, 0.1 ], [-1.91, 0.15, 0.1 ], ]
rub4 = [[ -1.69, 0.19 , 0.15 ], [-2.03 , 0.19, 0.15 ], [ -2.03, 0.53, 0.15 ], [-1.69, 0.53, 0.15 ], ]
rub5 = [[ -1.49, -0.15 , 0.2 ], [-1.83 , -0.15, 0.2 ], [ -1.83, 0.18, 0.2 ], [-1.49, 0.18, 0.2 ], ]
rub6 = [[ -1.29, 0.19 , 0.2 ], [-1.63 , 0.19, 0.2 ], [ -1.63, 0.53, 0.2 ], [-1.29, 0.53, 0.2 ], ]
rub7 = [[ -1.09, -0.15 , 0.15 ], [-1.43 , -0.15, 0.15], [ -1.43, 0.18, 0.15], [-1.09, 0.18, 0.15 ], ]
rub75 = [[ -0.89, 0.19 , 0.1 ], [-1.23 , 0.19, 0.1], [ -1.23, 0.53, 0.1], [-0.89, 0.53, 0.1 ], ]
rub8 = [[ -0.89, -0.15 , 0.025 ], [-1.02 , -0.15, 0.025], [ -1.02, 0.18, 0.025], [-0.89, 0.18, 0.025 ], ]
rub9 = [[ -0.35, -0.15 , 0.025 ], [-0.86 , -0.15, 0.025], [-0.86, 0.52, 0.025 ], [ -0.35, 0.52, 0.025], ]
rub8 = [[ -0.89, -0.15 , 0.05 ], [-1.02 , -0.15, 0.05], [ -1.02, 0.18, 0.05], [-0.89, 0.18, 0.05 ], ]
rub9 = [[ -0.45, -0.15 , 0.05 ], [-0.86 , -0.15, 0.05], [-0.86, 0.52, 0.05 ], [ -0.45, 0.52, 0.05], ]
all_surfaces = [floor, step1, step2, step3, step4,step5,step6, step7, bridge, platfo, rub8, rub9,rub7, rub75, rub6, rub5, rub4, rub3, rub2]
arub9 = array(rub9).T
arub8 = array(rub8).T
arub75 = array(rub75).T
arub7 = array(rub7).T
arub6 = array(rub6).T
arub5 = array(rub5).T
arub4 = array(rub4).T
arub3 = array(rub3).T
arub2 = array(rub2).T
#~ arub1 = array(rub1).T
afloor = array(floor).T
astep1 = array(step1).T
astep2 = array(step2).T
astep3 = array(step3).T
astep4 = array(step4).T
astep5 = array(step5).T
astep6 = array(step6).T
astep7 = array(step7).T
#~ astep8 = array(step8).T
#~ astep9 = array(step9).T
#~ astep10 = array(step10).T
abridge = array(bridge).T
aplatfo = array(platfo).T
aslope = array(slope).T
allrub = [arub2,arub3,arub5,arub4,arub6,arub7,arub75,arub9]
#~ surfaces0 = [[arub2],[arub3],allrub,allrub,allrub,allrub,[arub75,arub7,arub9] ,[arub9,afloor],[arub9,afloor],[afloor,arub9,astep1],[astep1,arub9,afloor], [astep2,astep1,astep3,astep4,astep5],[astep3,astep2,astep4,astep5],[astep4,astep1,astep2,astep3,astep5], [astep5,astep4,astep1,astep2,astep3],[astep6,astep5,astep4],[astep6,astep5,astep7],[astep6,astep5,astep7],[astep6],[astep7],[astep7],[abridge,aplatfo],[abridge,aplatfo],[abridge,aplatfo],[abridge,astep7,aplatfo]]
#surfaces0 = [[arub2],[arub3],allrub,allrub,allrub,allrub,[arub75,arub7,arub9] ,[arub9,afloor],[arub9,afloor],[afloor,arub9,astep1],[astep1,arub9,afloor], [astep2,astep1,astep3,astep4,astep5],[astep3,astep2,astep4,astep5],[astep4,astep1,astep2,astep3,astep5], [astep5,astep4,astep1,astep2,astep3],[astep6,astep5,astep4],[astep6,astep5,astep7],[astep6,astep5,astep7],[astep6],[astep7],[astep7],[abridge,aplatfo],[abridge,aplatfo],[abridge,aplatfo],[abridge,astep7,aplatfo]]
#surfaces0 = [[arub2,arub3],[arub3,arub2],[arub4,arub3,arub5],[arub5,arub4,arub3,arub6],[arub6],[arub7],[arub75] ,[arub9,afloor],[arub9,afloor],[afloor,arub9],[astep1], [astep2,astep3,astep4,astep5],[astep3,astep2,astep4,astep5],[astep4,astep1,astep2,astep3,astep5], [astep5,astep4,astep1,astep2,astep3],[astep6,astep5,astep4],[astep6],[astep6],[astep6,],[astep7],[astep7],[abridge,aplatfo],[abridge,aplatfo],[abridge,aplatfo],[abridge,astep7,aplatfo]]
#surfaces1 = [ [abridge], [abridge],[abridge,aplatfo],[abridge,aplatfo],[abridge,aplatfo],[abridge,aplatfo],[aplatfo],[aplatfo],[aplatfo],[aplatfo]]
surfaces = [[arub2,arub3],[arub3,arub2],[arub4,arub3,arub5],[arub5,arub4,arub3,arub6],[arub6],[arub7],[arub75] ,[arub9,afloor],[arub9,afloor],[afloor,arub9],[astep1],[astep2],[astep3], [astep4],[astep5],[astep6],[astep6]]
### END HARDCODED SURFACES ####
def gen_pb(surfaces):
kinematicConstraints = genKinematicConstraints(left_foot_constraints,right_foot_constraints,min_height = None)
relativeConstraints = genFootRelativeConstraints(right_foot_in_lf_frame_constraints,left_foot_in_rf_frame_constraints)
nphases = len(surfaces)
p0 = [array([-2.7805096486250154, 0.335, 0.]), array([-2.7805096486250154, 0.145,0.])]; ## FIXME : get it from planning too
print("p0 used : ",p0)
#res = { "p0" : p0, "c0" : None, "nphases": nphases}
res = { "p0" : None, "c0" : None, "nphases": nphases}
print("surfaces = ",surfaces)
#TODO in non planar cases, K must be rotated
phaseData = [ {"moving" : i%2, "fixed" : (i+1) % 2 , "K" : [copyKin(kinematicConstraints) for _ in range(len(surfaces[i]))], "relativeK" : [relativeConstraints[(i)%2] for _ in range(len(surfaces[i]))], "S" : surfaces[i] } for i in range(nphases)]
res ["phaseData"] = phaseData
return res
import mpl_toolkits.mplot3d as a3
import matplotlib.colors as colors
import scipy as sp
def draw_rectangle(l, ax):
#~ plotPoints(ax,l)
lr = l + [l[0]]
cx = [c[0] for c in lr]
cy = [c[1] for c in lr]
cz = [c[2] for c in lr]
ax.plot(cx, cy, cz)
def plotSurface (points, ax, plt,color_id = -1):
xs = np.append(points[0,:] ,points[0,0] ).tolist()
ys = np.append(points[1,:] ,points[1,0] ).tolist()
zs = (np.append(points[2,:] ,points[2,0] ) - np.ones(len(xs))*0.005*color_id).tolist()
colors = ['r','g','b','m','y','c']
if color_id == -1: ax.plot(xs,ys,zs)
else: ax.plot(xs,ys,zs,colors[color_id])
plt.draw()
def draw_scene(surfaces,ax = None):
colors = ['r','g','b','m','y','c']
color_id = 0
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
#[draw_rectangle(l,ax) for l in all_surfaces]
for surfaces_phase in surfaces:
for surface in surfaces_phase:
plotSurface(surface, ax, plt,color_id)
color_id += 1
if color_id >= len(colors):
color_id = 0
return ax
############# main ###################
def solve():
from sl1m.fix_sparsity import solveL1
pb = gen_pb(surfaces)
return solveL1(pb, surfaces, draw_scene)
if __name__ == '__main__':
from sl1m.fix_sparsity import solveL1 ,solveMIP
pb = gen_pb(surfaces)
solveMIP(pb, surfaces, MIP = True, draw_scene = draw_scene, plot = True)
#pb, coms, footpos, allfeetpos, res = solveL1(pb, surfaces, draw_scene)
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d as a3
import matplotlib.colors as colors
import scipy as sp
def draw_rectangle(l, ax):
#~ plotPoints(ax,l)
lr = l + [l[0]]
cx = [c[0] for c in lr]
cy = [c[1] for c in lr]
cz = [c[2] for c in lr]
ax.plot(cx, cy, cz)
def plotSurface (points, ax, plt, c = 'rand'):
xs = [point[0] for point in points]
ys = [point[1] for point in points]
zs = [point[2] for point in points]
xs = np.append(xs, xs[0]).tolist()
ys = np.append(ys, ys[0]).tolist()
zs = np.append(zs, zs[0]).tolist()
if c == 'rand': ax.plot(xs,ys,zs)
else: ax.plot(xs,ys,zs,c)
plt.draw()
def draw_scene(ax = None, color = "p"):
if ax is None:
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
#[draw_rectangle(l,ax) for l in all_surfaces]
for surface in my_surfaces: plotSurface(surface[0], ax, plt)
return ax
global my_surfaces
my_surfaces = tp.surfaces
ax = draw_scene()
plt.show()
"""
| 46.865672 | 474 | 0.578238 |
993242138f48c6646f23e4ee87a6325fc3f5b4b8 | 681 | py | Python | Python_source/PWM_Moter.py | liquse14/MIT-python | a4698de88c97bc3f1a01d067d3b8f8bc671567aa | [
"MIT"
] | null | null | null | Python_source/PWM_Moter.py | liquse14/MIT-python | a4698de88c97bc3f1a01d067d3b8f8bc671567aa | [
"MIT"
] | null | null | null | Python_source/PWM_Moter.py | liquse14/MIT-python | a4698de88c97bc3f1a01d067d3b8f8bc671567aa | [
"MIT"
] | null | null | null | import RPi.GPIO as GPIO
import time
pin=18
GPIO.setmode(GPIO.BCM)
GPIO.setup(pin,GPIO.OUT)
p=GPIO.PWM(pin,50)
p.start(0)
left_angle=12.5
center_angle=7.5
right_angle=2.5
def doAngle(angle):
p.ChangeDutyCycle(angle)
print ("Angle: %d"%angle)
time.sleep(0.5)
try:
while True:
var=input("Enter L/R/C:")
if var == 'R' or var =='r':
print ("Right")
doAngle(right_angle)
elif var =='L' or var =='l':
print ("Left")
doAngle(left_angle)
elif var == 'C' or var =='c':
print ("Center")
doAngle(center_angle)
except KeyboardInterrupt:
p.stop()
GPIO.cleanup() | 19.457143 | 37 | 0.565345 |
5dfddf6134a770d8bbd5247a4124cbf9e0cb34dd | 339 | py | Python | posthog/migrations/0062_team_anonymize_ips.py | avoajaugochukwu/posthog | 7e7fd42b0542ebc4734aedb926df11d462e3dd4f | [
"MIT"
] | 7,409 | 2020-02-09T23:18:10.000Z | 2022-03-31T22:36:25.000Z | posthog/migrations/0062_team_anonymize_ips.py | avoajaugochukwu/posthog | 7e7fd42b0542ebc4734aedb926df11d462e3dd4f | [
"MIT"
] | 5,709 | 2020-02-09T23:26:13.000Z | 2022-03-31T20:20:01.000Z | posthog/migrations/0062_team_anonymize_ips.py | avoajaugochukwu/posthog | 7e7fd42b0542ebc4734aedb926df11d462e3dd4f | [
"MIT"
] | 647 | 2020-02-13T17:50:55.000Z | 2022-03-31T11:24:19.000Z | # Generated by Django 3.0.6 on 2020-06-24 05:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("posthog", "0061_featureflag"),
]
operations = [
migrations.AddField(model_name="team", name="anonymize_ips", field=models.BooleanField(default=False),),
]
| 22.6 | 112 | 0.678466 |
bf9b9fbb2584888ed97f8293802579250e62de2d | 5,223 | py | Python | with_noise/pulse with noise/the statistics/expectancy_vs_sigma.py | helene-todd/M2_thesis_code | f844d6652229c6abe09bd40aa43f5002faa9e5ba | [
"MIT"
] | null | null | null | with_noise/pulse with noise/the statistics/expectancy_vs_sigma.py | helene-todd/M2_thesis_code | f844d6652229c6abe09bd40aa43f5002faa9e5ba | [
"MIT"
] | null | null | null | with_noise/pulse with noise/the statistics/expectancy_vs_sigma.py | helene-todd/M2_thesis_code | f844d6652229c6abe09bd40aa43f5002faa9e5ba | [
"MIT"
] | null | null | null | from matplotlib import cm, rcParams
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib as matplotlib
import numpy as np
import math as math
import random as rand
import os, sys, csv
import pandas as pd
#matplotlib.pyplot.xkcd(scale=.5, length=100, randomness=2)
c = ['#aa3863', '#d97020', '#ef9f07', '#449775', '#3b7d86', '#5443a3']
# red, orange, yellow, green, blue, purple
dW1, dW2, dW3 = 0, 0 ,0
np.random.seed() #42
def lif_euler(dt, v1, v2, I1, I2):
return [v1 + dt*(-v1 + gamma*(v2-v1) + I1) , v2 + dt*(-v2 + gamma*(v1-v2) + I2) ]
def lif_euler_stoch(dt, v1, v2, I1, I2, s1, s2, s3):
global dW1, dW2, dW3
dW1 = s1*math.sqrt(dt)*np.random.randn()
dW2 = s2*math.sqrt(dt)*np.random.randn()
dW3 = s3*math.sqrt(dt)*np.random.randn()
return [v1 + dt*(-v1 + gamma*(v2-v1) + I1) + dW1 + dW3, v2 + dt*(-v2 + gamma*(v1-v2) + I2) + dW2 + dW3]
def correlations(sigma1, sigma2, sigma3, nb_iterations=1000) :
phis = []
for k in range(nb_iterations) :
#v1_0, v2_0 = 0.7611728117817528, 0.1654125684129333 # Used XPPAUT to find ideal initial conditions s.t. we begin in antiphase with I = 1.4
v1_0, v2_0 = 0.3764002759711251, 0.8546679415731656
x1, x2 = [v1_0], [v2_0]
t = [0]
nb_spikes = 0
I_baseline = 1.5
I1, I2 = [I_baseline], [I_baseline]
pulse_start, pulse_duration = 0, 0.2
begin_pulse = True
while t[-1] < maxtime :
t.append(t[-1]+dt)
if nb_spikes == 10 and begin_pulse :
pulse_start = t[-1]
begin_pulse = False
if nb_spikes >= 10 and t[-1] < pulse_start + pulse_duration :
next_values= lif_euler_stoch(dt, x1[-1], x2[-1], I1[-1], I2[-1], sigma1, sigma2, sigma3)
I1.append(I_baseline + (dW1+dW3)/dt)
I2.append(I_baseline + (dW2+dW3)/dt)
else :
I1.append(I_baseline)
I2.append(I_baseline)
next_values = lif_euler(dt, x1[-1], x2[-1], I1[-1], I2[-1])
if next_values[0] > 1 :
x1.append(0)
nb_spikes += 1
if next_values[1] + gamma*beta > 1 :
x2.append(0)
else :
x2.append(next_values[1]+gamma*beta)
elif next_values[1] > 1 :
x2.append(0)
if next_values[0] + gamma*beta > 1 :
x1.append(0)
else :
x1.append(next_values[0]+gamma*beta)
else :
x1.append(next_values[0])
x2.append(next_values[1])
# Spike times
spike_times, k = [], 0
for i in range(1, len(t)) :
if abs(x1[i]-x1[i-1]) > (Vth-Vr)/2 and t[i] >= Dtime :
spike_times.append(t[i])
k = i
break
for i in range(k, len(t)) :
if abs(x2[i]-x2[i-1]) > (Vth-Vr)/2 :
spike_times.append(t[i])
k = i
break
for i in range(k, len(t)) :
if abs(x1[i+1]-x1[i]) > (Vth-Vr)/2 :
spike_times.append(t[i])
break
phis.append((spike_times[2] - spike_times[1])/(spike_times[2] - spike_times[0]))
"""
# Plot trials
fig, ax = plt.subplots(2, 1, figsize=(12,5), sharey='row')
ax[1].plot(t, x1, label='$V_{1}$', color='#aa3863')
ax[1].plot(t, x2, label='$V_{2}$', color='#3b7d86')
ax[0].plot(t, I1, label='$I_1$')
ax[0].plot(t, I2, label='$I_2$')
ax[0].legend(loc='upper right')
ax[1].legend(loc='upper right')
ax[0].set_title('Noisy input current trial, $\sigma=0.0025, I_{base}=1.5, \gamma=0.4, \\beta=0.1$')
#plt.savefig('trial_example_.png', dpi=600)
plt.show()
"""
phis = np.array(phis) % 1
print("phis ", phis)
return phis
gamma, beta = 0.4, 0.1
Vth, Vr = 1, 0
dt = 0.001
Dtime = 40
maxtime = 45
sigmas = [0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5]
correlated, uncorrelated = [], []
std_corr, std_uncorr = [], []
""" Correlated bs Uncorrelated"""
for sigma in sigmas :
phi_corr = correlations(0, 0, sigma, 1000)
phi_corr = pd.Series(phi_corr)
correlated.append(sum(phi_corr)/len(phi_corr))
std_corr.append(np.std(phi_corr, ddof=1)/np.sqrt(np.size(phi_corr)))
phi_uncorr = correlations(sigma, sigma, 0, 1000)
phi_uncorr = pd.Series(phi_uncorr)
uncorrelated.append(sum(phi_uncorr)/len(phi_uncorr))
std_uncorr.append(np.std(phi_uncorr, ddof=1)/np.sqrt(np.size(phi_uncorr)))
plt.figure(figsize=(8, 5))
plt.title(f'Expectancy of $\phi(t={Dtime})$ as a function of $\sigma$', size=15)
plt.ylabel('Expectancy $\mathbb{E}$'+f'$(\phi(t={Dtime}))$', size=12)
plt.xlabel('Sqrt of the variance $\sigma$', size=12)
plt.errorbar(sigmas, correlated, yerr=std_corr, fmt='-o', alpha=0.5, color=c[2], label='correlated')
plt.errorbar(sigmas, uncorrelated, yerr=std_uncorr, fmt='-o', alpha=0.5, color=c[-3], label='uncorrelated')
plt.legend(fontsize=11)
plt.tight_layout()
plt.savefig('expectancy_vs_sigma_alpha.svg')
plt.show()
| 33.915584 | 147 | 0.554854 |
2b1e189a72b43a9e1551417d8110317e75551c05 | 1,094 | py | Python | jp.atcoder/abc054/abc054_b/8211407.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-09T03:06:25.000Z | 2022-02-09T03:06:25.000Z | jp.atcoder/abc054/abc054_b/8211407.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | 1 | 2022-02-05T22:53:18.000Z | 2022-02-09T01:29:30.000Z | jp.atcoder/abc054/abc054_b/8211407.py | kagemeka/atcoder-submissions | 91d8ad37411ea2ec582b10ba41b1e3cae01d4d6e | [
"MIT"
] | null | null | null | n, m = [int(x) for x in input().split()]
img_a = [input() for _ in range(n)]
img_b = [input() for _ in range(m)]
found = False
for a in range(n - m + 1):
border = 0
isok = True
while isok and not flag:
for b in range(m):
if img_b[b] in img_a[b + a][border:]:
if b == 0:
index = img_a[a].index(img_b[0], border)
current_index = img_a[b + a].index(img_b[b], border)
if current_index == index:
continue
elif current_index > index:
border = current_index
break
else:
border = index
break
else: # if not substring in string.
isok = False
break
else: # if not loop broken.
ans = "Yes"
found = True
break
if found:
break
else: # if not 'found==True' even after last loop, then it means B is not in A.
ans = "No"
print(ans)
| 29.567568 | 81 | 0.439671 |
06238ea2acd3ef3e37b1b0814e35e24e6e85493a | 1,042 | py | Python | Competitive Programming/Google Code Jam 2018/GoGopher.py | piyushravi/Random | c8ccd0861ff0d3dc5542f23e0ce2f8ec4da38865 | [
"MIT"
] | null | null | null | Competitive Programming/Google Code Jam 2018/GoGopher.py | piyushravi/Random | c8ccd0861ff0d3dc5542f23e0ce2f8ec4da38865 | [
"MIT"
] | null | null | null | Competitive Programming/Google Code Jam 2018/GoGopher.py | piyushravi/Random | c8ccd0861ff0d3dc5542f23e0ce2f8ec4da38865 | [
"MIT"
] | null | null | null | import sys
t = int(input())
for _ in range(t):
A = int(input())
n = A//3
n += int(bool(A%3))
L = []
n = max(n, 3)
for x in range(n):
L.append([False, False, False])
flag = [False]*n
solved = False
unsolved = False
#print L
while False in flag:
for x in range(n):
if flag[x] == False:
if x==n-1:
print (8+x, 10)
elif x==0:
print (10, 10)
else:
print (9+x, 10)
sys.stdout.flush()
i, j = list(map(int, input().split()))
if i==j and i==0:
solved = True
break
if i==j and i==-1:
unsolved = True
break
L[i-9][j-9] = True
if L[i-9][0] and L[i-9][1] and L[i-9][2]:
flag[i-9] = True
if solved or unsolved:
break
if unsolved:
break
| 20.431373 | 57 | 0.362764 |
5d5244478f3ef9be1c43488cd47ac5a1f9a53b1f | 1,996 | py | Python | cyint_aws_ml_ops_tools/deployment/inference.py | CYINT/cyint-aws-ml-ops-tools | 5c20ae080f05f57b8aa3704a260be05594b9c1ef | [
"MIT"
] | null | null | null | cyint_aws_ml_ops_tools/deployment/inference.py | CYINT/cyint-aws-ml-ops-tools | 5c20ae080f05f57b8aa3704a260be05594b9c1ef | [
"MIT"
] | null | null | null | cyint_aws_ml_ops_tools/deployment/inference.py | CYINT/cyint-aws-ml-ops-tools | 5c20ae080f05f57b8aa3704a260be05594b9c1ef | [
"MIT"
] | null | null | null | import os
import boto3
import sagemaker
from sagemaker.deserializers import JSONDeserializer
from sagemaker.model import Model
from sagemaker.pipeline import PipelineModel
from sagemaker.serializers import CSVSerializer
from sagemaker.session import Session
from ..universal.pipeline import (
create_s3bucket_if_not_exist,
prepare_pipeline_variables,
sanitize_bucket_name,
)
def define_inference_endpoint(
name,
image,
serializer=CSVSerializer(),
deserializer=JSONDeserializer(),
initial_instance_count=1,
instance_type="ml.c4.xlarge",
artifact_filename="model.tar.gz",
aws_access_key=None,
aws_secret_key=None,
region=None,
role=None,
environment_prefix=None,
):
"""
Setup a SageMaker inference endpoint based on the environment
"""
(
environment_prefix_name,
aws_access_key_id,
aws_secret_key_id,
region_name,
) = prepare_pipeline_variables(
environment_prefix, aws_access_key, aws_secret_key, region
)
boto_session = boto3.Session(
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_key_id,
region_name=region_name,
)
sagemaker_client = boto_session.client(
service_name="sagemaker", region_name=region_name
)
sm = Session(boto_session=boto_session, sagemaker_client=sagemaker_client)
role_arn = os.environ["DEPLOYMENT_ROLE"] if role is None else role
model_name = sanitize_bucket_name(f"{environment_prefix_name}-{name}")
sm_model = Model(
image=image,
model_data=f"s3://{model_name}/{artifact_filename}",
role=role_arn,
sagemaker_session=sm,
)
endpoint_name = f"{environment_prefix_name}-{name}-endpoint"
return sm_model.deploy(
initial_instance_count=initial_instance_count,
serializer=serializer,
deserializer=deserializer,
instance_type=instance_type,
endpoint_name=endpoint_name,
)
| 26.972973 | 78 | 0.71994 |
10d54e79ce699efb49ba4e653f5a1f4a10bb0f66 | 2,277 | py | Python | hsds/delete_bucket.py | murlock/hsds | 9f5fc3cdb64017d07e34eb422eee5398553d213c | [
"Apache-2.0"
] | null | null | null | hsds/delete_bucket.py | murlock/hsds | 9f5fc3cdb64017d07e34eb422eee5398553d213c | [
"Apache-2.0"
] | null | null | null | hsds/delete_bucket.py | murlock/hsds | 9f5fc3cdb64017d07e34eb422eee5398553d213c | [
"Apache-2.0"
] | null | null | null | ##############################################################################
# Copyright by The HDF Group. #
# All rights reserved. #
# #
# This file is part of HSDS (HDF5 Scalable Data Service), Libraries and #
# Utilities. The full HSDS copyright notice, including #
# terms governing use, modification, and redistribution, is contained in #
# the file COPYING, which can be found at the root of the source code #
# distribution tree. If you do not have access to this file, you may #
# request a copy from help@hdfgroup.org. #
##############################################################################
import asyncio
import sys
from aiobotocore import get_session
from util.s3Util import deleteS3Obj, getS3Keys, releaseClient
import config
# This is a utility to delete all objects in the bucket
#
# Print usage and exit
#
def printUsage():
print("python delete_bucket.py")
print("Removes all objects in the bucket!")
sys.exit();
async def deleteAll(app):
print("getting list of objects")
keys = await getS3Keys(app)
print("got: {} objects".format(len(keys)))
if len(keys) == 0:
print("bucket is empty!")
return
# verify we really want to do this!
response = input("Enter 'Y' to continue:")
if response != 'Y':
print("cancel")
return
for key in keys:
await deleteS3Obj(app, key)
print("delete!")
def main():
if len(sys.argv) > 1 and (sys.argv[1] == "-h" or sys.argv[1] == "--help"):
printUsage()
sys.exit(1)
# we need to setup a asyncio loop to query s3
loop = asyncio.get_event_loop()
#loop.run_until_complete(init(loop))
session = get_session(loop=loop)
app = {}
app['bucket_name'] = config.get("bucket_name")
app["session"] = session
app["loop"] = loop
loop.run_until_complete(deleteAll(app))
releaseClient(app)
loop.close()
print("done!")
main()
| 28.111111 | 78 | 0.51076 |
2d485232b0aad214ee7e36b8834999a682f7f21f | 40,880 | py | Python | source/NVDAObjects/__init__.py | davidhilton936/clone | 0889f95ef2d74f43b2c98f4d45bf09b0c605f1de | [
"bzip2-1.0.6"
] | null | null | null | source/NVDAObjects/__init__.py | davidhilton936/clone | 0889f95ef2d74f43b2c98f4d45bf09b0c605f1de | [
"bzip2-1.0.6"
] | null | null | null | source/NVDAObjects/__init__.py | davidhilton936/clone | 0889f95ef2d74f43b2c98f4d45bf09b0c605f1de | [
"bzip2-1.0.6"
] | null | null | null | # -*- coding: UTF-8 -*-
#NVDAObjects/__init__.py
#A part of NonVisual Desktop Access (NVDA)
#Copyright (C) 2006-2016 NV Access Limited, Peter Vágner, Aleksey Sadovoy, Patrick Zajda
#This file is covered by the GNU General Public License.
#See the file COPYING for more details.
"""Module that contains the base NVDA object type"""
from new import instancemethod
import time
import re
import weakref
from logHandler import log
import review
import eventHandler
from displayModel import DisplayModelTextInfo
import baseObject
import speech
import api
import textInfos.offsets
import config
import controlTypes
import appModuleHandler
import treeInterceptorHandler
import braille
import globalPluginHandler
class NVDAObjectTextInfo(textInfos.offsets.OffsetsTextInfo):
"""A default TextInfo which is used to enable text review of information about widgets that don't support text content.
The L{NVDAObject.basicText} attribute is used as the text to expose.
"""
locationText=None
def _get_unit_mouseChunk(self):
return textInfos.UNIT_STORY
def _getStoryText(self):
return self.obj.basicText
def _getStoryLength(self):
return len(self._getStoryText())
def _getTextRange(self,start,end):
text=self._getStoryText()
return text[start:end]
class InvalidNVDAObject(RuntimeError):
"""Raised by NVDAObjects during construction to inform that this object is invalid.
In this case, for the purposes of NVDA, the object should be considered non-existent.
Therefore, L{DynamicNVDAObjectType} will return C{None} if this exception is raised.
"""
class DynamicNVDAObjectType(baseObject.ScriptableObject.__class__):
_dynamicClassCache={}
def __call__(self,chooseBestAPI=True,**kwargs):
if chooseBestAPI:
APIClass=self.findBestAPIClass(kwargs)
if not APIClass: return None
else:
APIClass=self
# Instantiate the requested class.
try:
obj=APIClass.__new__(APIClass,**kwargs)
obj.APIClass=APIClass
if isinstance(obj,self):
obj.__init__(**kwargs)
except InvalidNVDAObject, e:
log.debugWarning("Invalid NVDAObject: %s" % e, stack_info=True)
return None
clsList = []
if "findOverlayClasses" in APIClass.__dict__:
obj.findOverlayClasses(clsList)
else:
clsList.append(APIClass)
# Allow app modules to choose overlay classes.
appModule=obj.appModule
# optimisation: The base implementation of chooseNVDAObjectOverlayClasses does nothing,
# so only call this method if it's been overridden.
if appModule and not hasattr(appModule.chooseNVDAObjectOverlayClasses, "_isBase"):
appModule.chooseNVDAObjectOverlayClasses(obj, clsList)
# Allow global plugins to choose overlay classes.
for plugin in globalPluginHandler.runningPlugins:
if "chooseNVDAObjectOverlayClasses" in plugin.__class__.__dict__:
plugin.chooseNVDAObjectOverlayClasses(obj, clsList)
# Determine the bases for the new class.
bases=[]
for index in xrange(len(clsList)):
# A class doesn't need to be a base if it is already implicitly included by being a superclass of a previous base.
if index==0 or not issubclass(clsList[index-1],clsList[index]):
bases.append(clsList[index])
# Construct the new class.
if len(bases) == 1:
# We only have one base, so there's no point in creating a dynamic type.
newCls=bases[0]
else:
bases=tuple(bases)
newCls=self._dynamicClassCache.get(bases,None)
if not newCls:
name="Dynamic_%s"%"".join([x.__name__ for x in clsList])
newCls=type(name,bases,{})
self._dynamicClassCache[bases]=newCls
oldMro=frozenset(obj.__class__.__mro__)
# Mutate obj into the new class.
obj.__class__=newCls
# Initialise the overlay classes.
for cls in reversed(newCls.__mro__):
if cls in oldMro:
# This class was part of the initially constructed object, so its constructor would have been called.
continue
initFunc=cls.__dict__.get("initOverlayClass")
if initFunc:
initFunc(obj)
# Bind gestures specified on the class.
try:
obj.bindGestures(getattr(cls, "_%s__gestures" % cls.__name__))
except AttributeError:
pass
# Allow app modules to make minor tweaks to the instance.
if appModule and hasattr(appModule,"event_NVDAObject_init"):
appModule.event_NVDAObject_init(obj)
return obj
@classmethod
def clearDynamicClassCache(cls):
"""Clear the dynamic class cache.
This should be called when a plugin is unloaded so that any used overlay classes in the unloaded plugin can be garbage collected.
"""
cls._dynamicClassCache.clear()
class NVDAObject(baseObject.ScriptableObject):
"""NVDA's representation of a single control/widget.
Every widget, regardless of how it is exposed by an application or the operating system, is represented by a single NVDAObject instance.
This allows NVDA to work with all widgets in a uniform way.
An NVDAObject provides information about the widget (e.g. its name, role and value),
as well as functionality to manipulate it (e.g. perform an action or set focus).
Events for the widget are handled by special event methods on the object.
Commands triggered by input from the user can also be handled by special methods called scripts.
See L{ScriptableObject} for more details.
The only attribute that absolutely must be provided is L{processID}.
However, subclasses should provide at least the L{name} and L{role} attributes in order for the object to be meaningful to the user.
Attributes such as L{parent}, L{firstChild}, L{next} and L{previous} link an instance to other NVDAObjects in the hierarchy.
In order to facilitate access to text exposed by a widget which supports text content (e.g. an editable text control),
a L{textInfos.TextInfo} should be implemented and the L{TextInfo} attribute should specify this class.
There are two main types of NVDAObject classes:
* API classes, which provide the core functionality to work with objects exposed using a particular API (e.g. MSAA/IAccessible).
* Overlay classes, which supplement the core functionality provided by an API class to handle a specific widget or type of widget.
Most developers need only be concerned with overlay classes.
The overlay classes to be used for an instance are determined using the L{findOverlayClasses} method on the API class.
An L{AppModule} can also choose overlay classes for an instance using the L{AppModule.chooseNVDAObjectOverlayClasses} method.
"""
__metaclass__=DynamicNVDAObjectType
cachePropertiesByDefault = True
#: The TextInfo class this object should use to provide access to text.
#: @type: type; L{textInfos.TextInfo}
TextInfo=NVDAObjectTextInfo
@classmethod
def findBestAPIClass(cls,kwargs,relation=None):
"""
Finds out the highest-level APIClass this object can get to given these kwargs, and updates the kwargs and returns the APIClass.
@param relation: the relationship of a possible new object of this type to another object creating it (e.g. parent).
@param type: string
@param kwargs: the arguments necessary to construct an object of the class this method was called on.
@type kwargs: dictionary
@returns: the new APIClass
@rtype: DynamicNVDAObjectType
"""
newAPIClass=cls
if 'getPossibleAPIClasses' in newAPIClass.__dict__:
for possibleAPIClass in newAPIClass.getPossibleAPIClasses(kwargs,relation=relation):
if 'kwargsFromSuper' not in possibleAPIClass.__dict__:
log.error("possible API class %s does not implement kwargsFromSuper"%possibleAPIClass)
continue
if possibleAPIClass.kwargsFromSuper(kwargs,relation=relation):
return possibleAPIClass.findBestAPIClass(kwargs,relation=relation)
return newAPIClass if newAPIClass is not NVDAObject else None
@classmethod
def getPossibleAPIClasses(cls,kwargs,relation=None):
"""
Provides a generator which can generate all the possible API classes (in priority order) that inherit directly from the class it was called on.
@param relation: the relationship of a possible new object of this type to another object creating it (e.g. parent).
@param type: string
@param kwargs: the arguments necessary to construct an object of the class this method was called on.
@type kwargs: dictionary
@returns: a generator
@rtype: generator
"""
import NVDAObjects.window
yield NVDAObjects.window.Window
@classmethod
def kwargsFromSuper(cls,kwargs,relation=None):
"""
Finds out if this class can be instanciated from the given super kwargs.
If so it updates the kwargs to contain everything it will need to instanciate this class, and returns True.
If this class can not be instanciated, it returns False and kwargs is not touched.
@param relation: why is this class being instanciated? parent, focus, foreground etc...
@type relation: string
@param kwargs: the kwargs for constructing this class's super class.
@type kwargs: dict
@rtype: boolean
"""
raise NotImplementedError
def findOverlayClasses(self, clsList):
"""Chooses overlay classes which should be added to this object's class structure after the object has been initially instantiated.
After an NVDAObject class (normally an API-level class) is instantiated, this method is called on the instance to choose appropriate overlay classes.
This method may use properties, etc. on the instance to make this choice.
The object's class structure is then mutated to contain these classes.
L{initOverlayClass} is then called for each class which was not part of the initially instantiated object.
This process allows an NVDAObject to be dynamically created using the most appropriate NVDAObject subclass at each API level.
Classes should be listed with subclasses first. That is, subclasses should generally call super and then append their own classes to the list.
For example: Called on an IAccessible NVDAObjectThe list might contain DialogIaccessible (a subclass of IAccessible), Edit (a subclass of Window).
@param clsList: The list of classes, which will be modified by this method if appropriate.
@type clsList: list of L{NVDAObject}
"""
clsList.append(NVDAObject)
beTransparentToMouse=False #:If true then NVDA will never consider the mouse to be on this object, rather it will be on an ancestor.
@staticmethod
def objectFromPoint(x,y):
"""Retreaves an NVDAObject instance representing a control in the Operating System at the given x and y coordinates.
@param x: the x coordinate.
@type x: int
@param y: the y coordinate.
@param y: int
@return: The object at the given x and y coordinates.
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation=(x,y))
return APIClass(chooseBestAPI=False,**kwargs) if APIClass else None
@staticmethod
def objectWithFocus():
"""Retreaves the object representing the control currently with focus in the Operating System. This differens from NVDA's focus object as this focus object is the real focus object according to the Operating System, not according to NVDA.
@return: the object with focus.
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation="focus")
if not APIClass:
return None
obj=APIClass(chooseBestAPI=False,**kwargs)
if not obj:
return None
focusRedirect=obj.focusRedirect
if focusRedirect:
obj=focusRedirect
return obj
@staticmethod
def objectInForeground():
"""Retreaves the object representing the current foreground control according to the Operating System. This differes from NVDA's foreground object as this object is the real foreground object according to the Operating System, not according to NVDA.
@return: the foreground object
@rtype: L{NVDAObject}
"""
kwargs={}
APIClass=NVDAObject.findBestAPIClass(kwargs,relation="foreground")
return APIClass(chooseBestAPI=False,**kwargs) if APIClass else None
def __init__(self):
super(NVDAObject,self).__init__()
self._mouseEntered=False #:True if the mouse has entered this object (for use in L{event_mouseMoved})
self.textRepresentationLineLength=None #:If an integer greater than 0 then lines of text in this object are always this long.
def _isEqual(self,other):
"""Calculates if this object is equal to another object. Used by L{NVDAObject.__eq__}.
@param other: the other object to compare with.
@type other: L{NVDAObject}
@return: True if equal, false otherwise.
@rtype: boolean
"""
return True
def __eq__(self,other):
"""Compaires the objects' memory addresses, their type, and uses L{NVDAObject._isEqual} to see if they are equal.
"""
if self is other:
return True
if type(self) is not type(other):
return False
return self._isEqual(other)
def __ne__(self,other):
"""The opposite to L{NVDAObject.__eq__}
"""
return not self.__eq__(other)
focusRedirect=None #: Another object which should be treeted as the focus if focus is ever given to this object.
def _get_treeInterceptorClass(self):
"""
If this NVDAObject should use a treeInterceptor, then this property provides the L{treeInterceptorHandler.TreeInterceptor} class it should use.
If not then it should be not implemented.
"""
raise NotImplementedError
#: Whether to create a tree interceptor for this object.
#: This is only relevant if L{treeInterceptorClass} is valid.
#: Normally, this should be C{True}.
#: However, for some objects (e.g. ARIA applications), a tree interceptor shouldn't be used by default,
#: but the user may wish to override this.
#: In this case, this can be set to C{False} and updated later.
#: @type: bool
shouldCreateTreeInterceptor = True
def _get_treeInterceptor(self):
"""Retreaves the treeInterceptor associated with this object.
If a treeInterceptor has not been specifically set, the L{treeInterceptorHandler} is asked if it can find a treeInterceptor containing this object.
@return: the treeInterceptor
@rtype: L{treeInterceptorHandler.TreeInterceptor}
"""
if hasattr(self,'_treeInterceptor'):
ti=self._treeInterceptor
if isinstance(ti,weakref.ref):
ti=ti()
if ti and ti in treeInterceptorHandler.runningTable:
return ti
else:
self._treeInterceptor=None
return None
else:
ti=treeInterceptorHandler.getTreeInterceptor(self)
if ti:
self._treeInterceptor=weakref.ref(ti)
return ti
def _set_treeInterceptor(self,obj):
"""Specifically sets a treeInterceptor to be associated with this object.
"""
if obj:
self._treeInterceptor=weakref.ref(obj)
else: #We can't point a weakref to None, so just set the private variable to None, it can handle that
self._treeInterceptor=None
def _get_appModule(self):
"""Retreaves the appModule representing the application this object is a part of by asking L{appModuleHandler}.
@return: the appModule
@rtype: L{appModuleHandler.AppModule}
"""
if not hasattr(self,'_appModuleRef'):
a=appModuleHandler.getAppModuleForNVDAObject(self)
if a:
self._appModuleRef=weakref.ref(a)
return a
else:
return self._appModuleRef()
def _get_name(self):
"""The name or label of this object (example: the text of a button).
@rtype: basestring
"""
return ""
def _get_role(self):
"""The role or type of control this object represents (example: button, list, dialog).
@return: a ROLE_* constant from L{controlTypes}
@rtype: int
"""
return controlTypes.ROLE_UNKNOWN
def _get_value(self):
"""The value of this object (example: the current percentage of a scrollbar, the selected option in a combo box).
@rtype: basestring
"""
return ""
def _get_description(self):
"""The description or help text of this object.
@rtype: basestring
"""
return ""
def _get_controllerFor(self):
"""Retreaves the object/s that this object controls."""
return []
def _get_actionCount(self):
"""Retreaves the number of actions supported by this object."""
return 0
def getActionName(self,index=None):
"""Retreaves the name of an action supported by this object.
If index is not given then the default action will be used if it exists.
@param index: the optional 0-based index of the wanted action.
@type index: int
@return: the action's name
@rtype: basestring
"""
raise NotImplementedError
def doAction(self,index=None):
"""Performs an action supported by this object.
If index is not given then the default action will be used if it exists.
"""
raise NotImplementedError
def _get_defaultActionIndex(self):
"""Retreaves the index of the action that is the default."""
return 0
def _get_keyboardShortcut(self):
"""The shortcut key that activates this object(example: alt+t).
@rtype: basestring
"""
return ""
def _get_isInForeground(self):
"""
Finds out if this object is currently within the foreground.
"""
raise NotImplementedError
def _get_states(self):
"""Retreaves the current states of this object (example: selected, focused).
@return: a set of STATE_* constants from L{controlTypes}.
@rtype: set of int
"""
return set()
def _get_location(self):
"""The location of this object on the screen.
@return: left, top, width and height of the object.
@rtype: tuple of int
"""
raise NotImplementedError
def _get_locationText(self):
"""A message that explains the location of the object in friendly terms."""
location=self.location
if not location:
return None
(left,top,width,height)=location
deskLocation=api.getDesktopObject().location
(deskLeft,deskTop,deskWidth,deskHeight)=deskLocation
percentFromLeft=(float(left-deskLeft)/deskWidth)*100
percentFromTop=(float(top-deskTop)/deskHeight)*100
percentWidth=(float(width)/deskWidth)*100
percentHeight=(float(height)/deskHeight)*100
# Translators: Reports navigator object's dimensions (example output: object edges positioned 20 per cent from left edge of screen, 10 per cent from top edge of screen, width is 40 per cent of screen, height is 50 per cent of screen).
return _("Object edges positioned {left:.1f} per cent from left edge of screen, {top:.1f} per cent from top edge of screen, width is {width:.1f} per cent of screen, height is {height:.1f} per cent of screen").format(left=percentFromLeft,top=percentFromTop,width=percentWidth,height=percentHeight)
def _get_parent(self):
"""Retreaves this object's parent (the object that contains this object).
@return: the parent object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_container(self):
"""
Exactly like parent, however another object at this same sibling level may be retreaved first (e.g. a groupbox). Mostly used when presenting context such as focus ancestry.
"""
# Cache parent.
parent = self.parent
self.parent = parent
return parent
def _get_next(self):
"""Retreaves the object directly after this object with the same parent.
@return: the next object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_previous(self):
"""Retreaves the object directly before this object with the same parent.
@return: the previous object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_firstChild(self):
"""Retreaves the first object that this object contains.
@return: the first child object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_lastChild(self):
"""Retreaves the last object that this object contains.
@return: the last child object if it exists else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_children(self):
"""Retreaves a list of all the objects directly contained by this object (who's parent is this object).
@rtype: list of L{NVDAObject}
"""
children=[]
child=self.firstChild
while child:
children.append(child)
child=child.next
return children
def getChild(self, index):
"""Retrieve a child by index.
@note: Subclasses may override this if they have an efficient way to retrieve a single, arbitrary child.
The base implementation uses L{children}.
@param index: The 0-based index of the child to retrieve.
@type index: int
@return: The child.
@rtype: L{NVDAObject}
"""
return self.children[index]
def _get_rowNumber(self):
"""Retreaves the row number of this object if it is in a table.
@rtype: int
"""
raise NotImplementedError
def _get_columnNumber(self):
"""Retreaves the column number of this object if it is in a table.
@rtype: int
"""
raise NotImplementedError
def _get_cellCoordsText(self):
"""
An alternative text representation of cell coordinates e.g. "a1". Will override presentation of rowNumber and columnNumber.
Only implement if the representation is really different.
"""
return None
def _get_rowCount(self):
"""Retreaves the number of rows this object contains if its a table.
@rtype: int
"""
raise NotImplementedError
def _get_columnCount(self):
"""Retreaves the number of columns this object contains if its a table.
@rtype: int
"""
raise NotImplementedError
def _get_rowHeaderText(self):
"""The text of the row headers for this cell.
@rtype: str
"""
raise NotImplementedError
def _get_columnHeaderText(self):
"""The text of the column headers for this cell.
@rtype: str
"""
raise NotImplementedError
def _get_table(self):
"""Retreaves the object that represents the table that this object is contained in, if this object is a table cell.
@rtype: L{NVDAObject}
"""
raise NotImplementedError
def _get_tableID(self):
"""The identifier of the table associated with this object if it is a table cell.
This identifier must distinguish this table from other tables.
If this is not implemented, table cell information will still be reported,
but row and column information will always be reported
even if the user moves to a cell in the same row/column.
"""
raise NotImplementedError
def _get_recursiveDescendants(self):
"""Recursively traverse and return the descendants of this object.
This is a depth-first forward traversal.
@return: The recursive descendants of this object.
@rtype: generator of L{NVDAObject}
"""
for child in self.children:
yield child
for recursiveChild in child.recursiveDescendants:
yield recursiveChild
presType_unavailable="unavailable"
presType_layout="layout"
presType_content="content"
def _get_presentationType(self):
states=self.states
if controlTypes.STATE_INVISIBLE in states or controlTypes.STATE_UNAVAILABLE in states:
return self.presType_unavailable
role=self.role
#Static text should be content only if it really use usable text
if role==controlTypes.ROLE_STATICTEXT:
text=self.makeTextInfo(textInfos.POSITION_ALL).text
return self.presType_content if text and not text.isspace() else self.presType_layout
if role in (controlTypes.ROLE_UNKNOWN, controlTypes.ROLE_PANE, controlTypes.ROLE_TEXTFRAME, controlTypes.ROLE_ROOTPANE, controlTypes.ROLE_LAYEREDPANE, controlTypes.ROLE_SCROLLPANE, controlTypes.ROLE_SECTION, controlTypes.ROLE_PARAGRAPH, controlTypes.ROLE_TITLEBAR, controlTypes.ROLE_LABEL, controlTypes.ROLE_WHITESPACE,controlTypes.ROLE_BORDER):
return self.presType_layout
name = self.name
description = self.description
if not name and not description:
if role in (controlTypes.ROLE_WINDOW,controlTypes.ROLE_PANEL, controlTypes.ROLE_PROPERTYPAGE, controlTypes.ROLE_TEXTFRAME, controlTypes.ROLE_GROUPING,controlTypes.ROLE_OPTIONPANE,controlTypes.ROLE_INTERNALFRAME,controlTypes.ROLE_FORM,controlTypes.ROLE_TABLEBODY):
return self.presType_layout
if role == controlTypes.ROLE_TABLE and not config.conf["documentFormatting"]["reportTables"]:
return self.presType_layout
if role in (controlTypes.ROLE_TABLEROW,controlTypes.ROLE_TABLECOLUMN,controlTypes.ROLE_TABLECELL) and (not config.conf["documentFormatting"]["reportTables"] or not config.conf["documentFormatting"]["reportTableCellCoords"]):
return self.presType_layout
if role in (controlTypes.ROLE_TABLEROW,controlTypes.ROLE_TABLECOLUMN):
try:
table=self.table
except NotImplementedError:
table=None
if table:
# This is part of a real table, so the cells will report row/column information.
# Therefore, this object is just for layout.
return self.presType_layout
return self.presType_content
def _get_simpleParent(self):
obj=self.parent
while obj and obj.presentationType!=self.presType_content:
obj=obj.parent
return obj
def _findSimpleNext(self,useChild=False,useParent=True,goPrevious=False):
nextPrevAttrib="next" if not goPrevious else "previous"
firstLastChildAttrib="firstChild" if not goPrevious else "lastChild"
found=None
if useChild:
child=getattr(self,firstLastChildAttrib)
childPresType=child.presentationType if child else None
if childPresType==self.presType_content:
found=child
elif childPresType==self.presType_layout:
found=child._findSimpleNext(useChild=True,useParent=False,goPrevious=goPrevious)
elif child:
found=child._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if found:
return found
next=getattr(self,nextPrevAttrib)
nextPresType=next.presentationType if next else None
if nextPresType==self.presType_content:
found=next
elif nextPresType==self.presType_layout:
found=next._findSimpleNext(useChild=True,useParent=False,goPrevious=goPrevious)
elif next:
found=next._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if found:
return found
parent=self.parent if useParent else None
while parent and parent.presentationType!=self.presType_content:
next=parent._findSimpleNext(useChild=False,useParent=False,goPrevious=goPrevious)
if next:
return next
parent=parent.parent
def _get_simpleNext(self):
return self._findSimpleNext()
def _get_simplePrevious(self):
return self._findSimpleNext(goPrevious=True)
def _get_simpleFirstChild(self):
child=self.firstChild
if not child:
return None
presType=child.presentationType
if presType!=self.presType_content: return child._findSimpleNext(useChild=(presType!=self.presType_unavailable),useParent=False)
return child
def _get_simpleLastChild(self):
child=self.lastChild
if not child:
return None
presType=child.presentationType
if presType!=self.presType_content: return child._findSimpleNext(useChild=(presType!=self.presType_unavailable),useParent=False,goPrevious=True)
return child
def _get_childCount(self):
"""Retreaves the number of children this object contains.
@rtype: int
"""
return len(self.children)
def _get_activeChild(self):
"""Retreaves the child of this object that currently has, or contains, the focus.
@return: the active child if it has one else None
@rtype: L{NVDAObject} or None
"""
return None
def _get_isFocusable(self):
"""Whether this object is focusable.
@rtype: bool
"""
return controlTypes.STATE_FOCUSABLE in self.states
def _get_hasFocus(self):
"""Whether this object has focus.
@rtype: bool
"""
return controlTypes.STATE_FOCUSED in self.states
def setFocus(self):
"""
Tries to force this object to take the focus.
"""
pass
def scrollIntoView(self):
"""Scroll this object into view on the screen if possible.
"""
raise NotImplementedError
def _get_labeledBy(self):
"""Retreaves the object that this object is labeled by (example: the static text label beside an edit field).
@return: the label object if it has one else None.
@rtype: L{NVDAObject} or None
"""
return None
def _get_positionInfo(self):
"""Retreaves position information for this object such as its level, its index with in a group, and the number of items in that group.
@return: a dictionary containing any of level, groupIndex and similarItemsInGroup.
@rtype: dict
"""
return {}
def _get_processID(self):
"""Retreaves an identifyer of the process this object is a part of.
@rtype: int
"""
raise NotImplementedError
def _get_isProtected(self):
"""
@return: True if this object is protected (hides its input for passwords), or false otherwise
@rtype: boolean
"""
return False
def _get_indexInParent(self):
"""The index of this object in its parent object.
@return: The 0 based index, C{None} if there is no parent.
@rtype: int
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_flowsTo(self):
"""The object to which content flows from this object.
@return: The object to which this object flows, C{None} if none.
@rtype: L{NVDAObject}
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_flowsFrom(self):
"""The object from which content flows to this object.
@return: The object from which this object flows, C{None} if none.
@rtype: L{NVDAObject}
@raise NotImplementedError: If not supported by the underlying object.
"""
raise NotImplementedError
def _get_isPresentableFocusAncestor(self):
"""Determine if this object should be presented to the user in the focus ancestry.
@return: C{True} if it should be presented in the focus ancestry, C{False} if not.
@rtype: bool
"""
if self.presentationType == self.presType_layout:
return False
if self.role in (controlTypes.ROLE_TREEVIEWITEM, controlTypes.ROLE_LISTITEM, controlTypes.ROLE_PROGRESSBAR, controlTypes.ROLE_EDITABLETEXT):
return False
return True
def _get_statusBar(self):
"""Finds the closest status bar in relation to this object.
@return: the found status bar else None
@rtype: L{NVDAObject} or None
"""
return None
def _get_isCurrent(self):
"""Gets the value that indicates whether this object is the current element in a set of related
elements. This maps to aria-current. Normally returns False. If this object is current
it will return one of the following values: True, "page", "step", "location", "date", "time"
"""
return False
def reportFocus(self):
"""Announces this object in a way suitable such that it gained focus.
"""
speech.speakObject(self,reason=controlTypes.REASON_FOCUS)
def _reportErrorInPreviousWord(self):
try:
# self might be a descendant of the text control; e.g. Symphony.
# We want to deal with the entire text, so use the caret object.
info = api.getCaretObject().makeTextInfo(textInfos.POSITION_CARET)
# This gets called for characters which might end a word; e.g. space.
# The character before the caret is the word end.
# The one before that is the last of the word, which is what we want.
info.move(textInfos.UNIT_CHARACTER, -2)
info.expand(textInfos.UNIT_CHARACTER)
fields = info.getTextWithFields()
except RuntimeError:
return
except:
# Focus probably moved.
log.debugWarning("Error fetching last character of previous word", exc_info=True)
return
for command in fields:
if isinstance(command, textInfos.FieldCommand) and command.command == "formatChange" and command.field.get("invalid-spelling"):
break
else:
# No error.
return
import nvwave
nvwave.playWaveFile(r"waves\textError.wav")
def event_typedCharacter(self,ch):
if config.conf["documentFormatting"]["reportSpellingErrors"] and config.conf["keyboard"]["alertForSpellingErrors"] and (
# Not alpha, apostrophe or control.
ch.isspace() or (ch >= u" " and ch not in u"'\x7f" and not ch.isalpha())
):
# Reporting of spelling errors is enabled and this character ends a word.
self._reportErrorInPreviousWord()
speech.speakTypedCharacters(ch)
import winUser
if config.conf["keyboard"]["beepForLowercaseWithCapslock"] and ch.islower() and winUser.getKeyState(winUser.VK_CAPITAL)&1:
import tones
tones.beep(3000,40)
def event_mouseMove(self,x,y):
if not self._mouseEntered and config.conf['mouse']['reportObjectRoleOnMouseEnter']:
speech.cancelSpeech()
speech.speakObjectProperties(self,role=True)
speechWasCanceled=True
else:
speechWasCanceled=False
self._mouseEntered=True
try:
info=self.makeTextInfo(textInfos.Point(x,y))
except NotImplementedError:
info=NVDAObjectTextInfo(self,textInfos.POSITION_FIRST)
except LookupError:
return
if config.conf["reviewCursor"]["followMouse"]:
api.setReviewPosition(info)
info.expand(info.unit_mouseChunk)
oldInfo=getattr(self,'_lastMouseTextInfoObject',None)
self._lastMouseTextInfoObject=info
if not oldInfo or info.__class__!=oldInfo.__class__ or info.compareEndPoints(oldInfo,"startToStart")!=0 or info.compareEndPoints(oldInfo,"endToEnd")!=0:
text=info.text
notBlank=False
if text:
for ch in text:
if not ch.isspace() and ch!=u'\ufffc':
notBlank=True
if notBlank:
if not speechWasCanceled:
speech.cancelSpeech()
speech.speakText(text)
def event_stateChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self,states=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_focusEntered(self):
if self.role in (controlTypes.ROLE_MENUBAR,controlTypes.ROLE_POPUPMENU,controlTypes.ROLE_MENUITEM):
speech.cancelSpeech()
return
if self.isPresentableFocusAncestor:
speech.speakObject(self,reason=controlTypes.REASON_FOCUSENTERED)
def event_gainFocus(self):
"""
This code is executed if a gain focus event is received by this object.
"""
self.reportFocus()
braille.handler.handleGainFocus(self)
def event_foreground(self):
"""Called when the foreground window changes.
This method should only perform tasks specific to the foreground window changing.
L{event_focusEntered} or L{event_gainFocus} will be called for this object, so this method should not speak/braille the object, etc.
"""
speech.cancelSpeech()
def event_becomeNavigatorObject(self):
"""Called when this object becomes the navigator object.
"""
braille.handler.handleReviewMove()
def event_valueChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, value=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_nameChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, name=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_descriptionChange(self):
if self is api.getFocusObject():
speech.speakObjectProperties(self, description=True, reason=controlTypes.REASON_CHANGE)
braille.handler.handleUpdate(self)
def event_caret(self):
if self is api.getFocusObject() and not eventHandler.isPendingEvents("gainFocus"):
braille.handler.handleCaretMove(self)
review.handleCaretMove(self)
def _get_flatReviewPosition(self):
"""Locates a TextInfo positioned at this object, in the closest flat review."""
parent=self.simpleParent
while parent:
ti=parent.treeInterceptor
if ti and self in ti and ti.rootNVDAObject==parent:
return ti.makeTextInfo(self)
if issubclass(parent.TextInfo,DisplayModelTextInfo):
try:
return parent.makeTextInfo(api.getReviewPosition().pointAtStart)
except (NotImplementedError,LookupError):
pass
try:
return parent.makeTextInfo(self)
except (NotImplementedError,RuntimeError):
pass
return parent.makeTextInfo(textInfos.POSITION_FIRST)
parent=parent.simpleParent
def _get_basicText(self):
newTime=time.time()
oldTime=getattr(self,'_basicTextTime',0)
if newTime-oldTime>0.5:
self._basicText=u" ".join([x for x in self.name, self.value, self.description if isinstance(x, basestring) and len(x) > 0 and not x.isspace()])
if len(self._basicText)==0:
self._basicText=u""
else:
self._basicTextTime=newTime
return self._basicText
def makeTextInfo(self,position):
return self.TextInfo(self,position)
@staticmethod
def _formatLongDevInfoString(string, truncateLen=250):
"""Format a potentially long string value for inclusion in devInfo.
This should be used for arbitrary string values which aren't usually useful in debugging past a certain length.
If the string is too long to be useful, it will be truncated.
This string should be included as returned. There is no need to call repr.
@param string: The string to format.
@type string: nbasestring
@param truncateLen: The length at which to truncate the string.
@type truncateLen: int
@return: The formatted string.
@rtype: basestring
"""
if isinstance(string, basestring) and len(string) > truncateLen:
return "%r (truncated)" % string[:truncateLen]
return repr(string)
def _get_devInfo(self):
"""Information about this object useful to developers.
Subclasses may extend this, calling the superclass property first.
@return: A list of text strings providing information about this object useful to developers.
@rtype: list of str
"""
info = []
try:
ret = repr(self.name)
except Exception as e:
ret = "exception: %s" % e
info.append("name: %s" % ret)
try:
ret = self.role
for name, const in controlTypes.__dict__.iteritems():
if name.startswith("ROLE_") and ret == const:
ret = name
break
except Exception as e:
ret = "exception: %s" % e
info.append("role: %s" % ret)
try:
stateConsts = dict((const, name) for name, const in controlTypes.__dict__.iteritems() if name.startswith("STATE_"))
ret = ", ".join(
stateConsts.get(state) or str(state)
for state in self.states)
except Exception as e:
ret = "exception: %s" % e
info.append("states: %s" % ret)
try:
ret = repr(self.isFocusable)
except Exception as e:
ret = "exception: %s" % e
info.append("isFocusable: %s" % ret)
try:
ret = repr(self.hasFocus)
except Exception as e:
ret = "exception: %s" % e
info.append("hasFocus: %s" % ret)
try:
ret = repr(self)
except Exception as e:
ret = "exception: %s" % e
info.append("Python object: %s" % ret)
try:
ret = repr(self.__class__.__mro__)
except Exception as e:
ret = "exception: %s" % e
info.append("Python class mro: %s" % ret)
try:
ret = repr(self.description)
except Exception as e:
ret = "exception: %s" % e
info.append("description: %s" % ret)
try:
ret = repr(self.location)
except Exception as e:
ret = "exception: %s" % e
info.append("location: %s" % ret)
formatLong = self._formatLongDevInfoString
try:
ret = formatLong(self.value)
except Exception as e:
ret = "exception: %s" % e
info.append("value: %s" % ret)
try:
ret = repr(self.appModule)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule: %s" % ret)
try:
ret = repr(self.appModule.productName)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule.productName: %s" % ret)
try:
ret = repr(self.appModule.productVersion)
except Exception as e:
ret = "exception: %s" % e
info.append("appModule.productVersion: %s" % ret)
try:
ret = repr(self.TextInfo)
except Exception as e:
ret = "exception: %s" % e
info.append("TextInfo: %s" % ret)
return info
def _get_sleepMode(self):
"""Whether NVDA should sleep for this object (e.g. it is self-voicing).
If C{True}, all events and script requests for this object are silently dropped.
@rtype: bool
"""
if self.appModule:
return self.appModule.sleepMode
return False
# Don't cache sleepMode, as it is derived from a property which might change
# and we want the changed value immediately.
_cache_sleepMode = False
def _get_mathMl(self):
"""Obtain the MathML markup for an object containing math content.
This will only be called (and thus only needs to be implemented) for
objects with a role of L{controlTypes.ROLE_MATH}.
@raise LookupError: If MathML can't be retrieved for this object.
"""
raise NotImplementedError
#: The language/locale of this object.
#: @type: basestring
language = None
| 37.367459 | 348 | 0.730773 |
fc46c44601b5193017d5090be03dfac21f03114b | 407 | py | Python | backend/speakeasy_28830/wsgi.py | crowdbotics-apps/speakeasy-28830 | 4ea91d73b9eeaa78a76871e192a0c44207ab5f60 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/speakeasy_28830/wsgi.py | crowdbotics-apps/speakeasy-28830 | 4ea91d73b9eeaa78a76871e192a0c44207ab5f60 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/speakeasy_28830/wsgi.py | crowdbotics-apps/speakeasy-28830 | 4ea91d73b9eeaa78a76871e192a0c44207ab5f60 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | """
WSGI config for speakeasy_28830 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'speakeasy_28830.settings')
application = get_wsgi_application()
| 23.941176 | 78 | 0.793612 |
5c62f20f0d2c5f6dba205d172e9f08fabfb9a642 | 388 | py | Python | app/__init__.py | GenryEden/weatherStationMonitoring | efddc5c816cfa33a26a39de59cf756ec77da7f56 | [
"MIT"
] | null | null | null | app/__init__.py | GenryEden/weatherStationMonitoring | efddc5c816cfa33a26a39de59cf756ec77da7f56 | [
"MIT"
] | null | null | null | app/__init__.py | GenryEden/weatherStationMonitoring | efddc5c816cfa33a26a39de59cf756ec77da7f56 | [
"MIT"
] | null | null | null | from flask import Flask
from app import newDbWorker as dbWorker
from app import config
from apscheduler.schedulers.background import BackgroundScheduler
app = Flask(__name__)
worker = dbWorker.worker()
from app import graphMaker
scheduler = BackgroundScheduler()
job = scheduler.add_job(graphMaker.make, 'interval', minutes=config.updateTime)
scheduler.start()
from app import views
| 22.823529 | 79 | 0.814433 |
9de7613d5def486e8be9b08de14f0841e7a77f0e | 2,154 | py | Python | leo/leo.py | patel-bhavin/leonidas | c317af28dae820380462f641f7ad59d3002b15f5 | [
"MIT"
] | null | null | null | leo/leo.py | patel-bhavin/leonidas | c317af28dae820380462f641f7ad59d3002b15f5 | [
"MIT"
] | null | null | null | leo/leo.py | patel-bhavin/leonidas | c317af28dae820380462f641f7ad59d3002b15f5 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""
Leo - case executor for Leonidas. Takes a config file as its first argument.
"""
import json
# import logging
import sys
import time
import requests
import yaml
if __name__ == "__main__":
config = yaml.safe_load(open(sys.argv[1], "r"))
print("Url: {}".format(config["url"]))
for case in config["cases"]:
print(config["cases"][case]["name"])
url = config["url"] + config["cases"][case]["path"]
headers = {}
# if we're running Leonidas locally, no need for an API key,
# so let's not error out if there's not one in the config
try:
headers["x-api-key"] = config["apikey"]
except KeyError:
continue
# Grab the args from the case
if "args" in config["cases"][case]:
if config["cases"][case]["args"]:
args = config["cases"][case]["args"]
else:
args = {}
else:
args = {}
# load any credentials and region details configured in the caseconfig
if ("identity" in config) and (config["identity"] is not None):
if "role_arn" in config["identity"]:
args["role_arn"] = config["identity"]["role_arn"]
elif ("access_key_id" in config["identity"]) and ("secret_access_key" in config["identity"]):
args["access_key_id"] = config["identity"]["access_key_id"]
args["secret_access_key"] = config["identity"]["secret_access_key"]
if "region" in config["identity"]:
args["region"] = config["identity"]["region"]
# If it's a request with parameters it'll need to be POSTed, otherwise it's a GET request
if ("args" in config["cases"][case]) and (config["cases"][case]["args"] is not None):
r = requests.post(url, headers=headers, params=args)
else:
r = requests.get(url, headers=headers, params=args)
print(json.dumps(r.json(), indent=4, sort_keys=True))
# Sleep between cases
time.sleep(config["sleeptime"])
print("Ran {} test cases".format(len(config["cases"])))
| 35.9 | 105 | 0.577066 |
58a0265301c70c249ce31b28214ba167e583eda1 | 2,981 | py | Python | data/interpolate_elliptic_integral_3.py | pmehta08/MulensModel | 261738c445a8d116d09c90e65f6e847cfc8a7ad8 | [
"MIT"
] | 30 | 2016-08-30T23:32:43.000Z | 2022-03-07T20:06:25.000Z | data/interpolate_elliptic_integral_3.py | pmehta08/MulensModel | 261738c445a8d116d09c90e65f6e847cfc8a7ad8 | [
"MIT"
] | 25 | 2018-08-22T19:14:22.000Z | 2022-03-28T17:22:56.000Z | data/interpolate_elliptic_integral_3.py | pmehta08/MulensModel | 261738c445a8d116d09c90e65f6e847cfc8a7ad8 | [
"MIT"
] | 11 | 2016-10-03T16:00:50.000Z | 2022-03-23T16:53:54.000Z | """
Calculates interpolation tables for elliptical integral of the third kind.
"""
import math
import numpy as np
from math import sin, cos, sqrt
from scipy import integrate
from scipy.interpolate import interp1d, interp2d
from sympy.functions.special.elliptic_integrals import elliptic_pi as ellip3
accuracy = 1.e-4
n_divide = 3 + 1
x_start = 1.e-4
x_stop = 1.-1.e-6
y_start = 1.e-4
y_stop = 1.-1.e-6
n_start = 10
file_out_name = "interpolate_elliptic_integral_3.dat"
# Settings end here.
def get_ellip(x, y):
p = []
z = np.zeros((len(x), len(y)))
for (i, x_) in enumerate(x):
for (j, y_) in enumerate(y):
index = (x_, y_)
if index not in get_ellip.p:
get_ellip.p[index] = ellip3(x_, y_)
z[i, j] = get_ellip.p[index]
return z
get_ellip.p = dict()
x = np.logspace(np.log10(x_start), np.log10(x_stop), n_start)
y = np.logspace(np.log10(y_start), np.log10(y_stop), n_start)
iteration = 0
add_x = [None]
add_y = [None]
while len(add_x) > 0 or len(add_y) > 0:
iteration += 1
add_x = []
add_y = []
p = get_ellip(x, y)
interp_p = interp2d(x, y, p.T, kind='cubic')
check_x = []
for i in range(len(x)-1):
check_ = np.logspace(np.log10(x[i]), np.log10(x[i+1]), n_divide)
check_x += check_[1:-1].tolist()
check_y = []
for i in range(len(y)-1):
check_ = np.logspace(np.log10(y[i]), np.log10(y[i+1]), n_divide)
check_y += check_[1: -1].tolist()
check_true_p = get_ellip(check_x, check_y)
check_p = np.zeros((len(check_x), len(check_y)))
for (ix, cx) in enumerate(check_x):
for (iy, cy) in enumerate(check_y):
if cy > cx:
check_p[ix, iy] = 1.
check_true_p[ix, iy] = 1.
else:
check_p[ix, iy] = interp_p(cx, cy)[0]
relative_diff_p = np.abs(check_p - check_true_p) / check_true_p
index = np.unravel_index(relative_diff_p.argmax(), relative_diff_p.shape)
if np.max(relative_diff_p) < accuracy:
continue
add_x.append(check_x[index[0]])
add_y.append(check_y[index[1]])
new_x = np.sort(add_x + x.tolist())
new_y = np.sort(add_y + y.tolist())
x = new_x
y = new_y
# Write to output file.
p = get_ellip(x, y)
with open(file_out_name, "w") as f_out:
f_out.write(" ".join(["# X"] + [str(x_) for x_ in x] + ["\n"]))
f_out.write(" ".join(["# Y"] + [str(y_) for y_ in y] + ["\n"]))
for (i, x_) in enumerate(x):
f_out.write(
" ".join([str(p[i, j]) for (j, y_) in enumerate(y)] + ["\n"]))
# Read the output file and test it.
with open(file_out_name) as file_in:
for line in file_in.readlines():
if line[:3] == "# X":
xx = np.array([float(t) for t in line.split()[2:]])
if line[:3] == "# Y":
yy = np.array([float(t) for t in line.split()[2:]])
pp = np.loadtxt(file_out_name)
print(np.all(x == xx))
print(np.all(y == yy))
print(np.all(p == pp))
| 30.111111 | 77 | 0.587722 |
31e50d182ac5a01e9c3dc2afb147fa9018f381e7 | 2,536 | py | Python | 07_Septuple/septuple_pelirrojo_2_v1.py | Machine-Learning-Labs/DeepRacerRewardFunctionsCollection | f6addf4654de90f9d1669fd5de67331add93ab2f | [
"MIT"
] | 17 | 2020-01-14T06:25:10.000Z | 2022-01-25T18:02:37.000Z | 07_Septuple/septuple_pelirrojo_2_v1.py | Machine-Learning-Labs/DeepRacerRewardFunctionsCollection | f6addf4654de90f9d1669fd5de67331add93ab2f | [
"MIT"
] | null | null | null | 07_Septuple/septuple_pelirrojo_2_v1.py | Machine-Learning-Labs/DeepRacerRewardFunctionsCollection | f6addf4654de90f9d1669fd5de67331add93ab2f | [
"MIT"
] | 5 | 2020-05-30T18:49:18.000Z | 2021-09-03T19:38:39.000Z | '''
@author: Manuel Eusebio de Paz Carmona // Pelirrojo
@Link: https://github.com/Pelirrojo/DeepRacerRewardFunctionsCollection
@License: MIT
Inspired on sextuple_tiboonn_v1.py
'''
def reward_function(params):
# Read input parameters ##############################
speed = params['speed']
progress = params['progress']
track_width = params['track_width']
distance_from_center = params['distance_from_center']
all_wheels_on_track = params['all_wheels_on_track']
steering = abs(params['steering_angle'])
is_left_of_center = params['is_left_of_center']
# Constants #########################################
ABS_STEERING_THRESHOLD = 15
# Min / Max Reward
REWARD_MIN = -100000.0
REWARD_MAX = 100000.0
# Calculate markers that are at varying distances away from the center line
marker_1 = 0.1 * track_width
marker_2 = 0.15 * track_width
marker_3 = 0.25 * track_width
marker_4 = 0.5 * track_width
# Set Base Reward
reward = 1
# we want the vehicle to continue making progress
if not all_wheels_on_track: # Fail them if off Track
return float(REWARD_MIN)
elif progress >= 99:
return float(REWARD_MAX)
elif progress >= 75:
reward = (REWARD_MAX/2) * (progress/100)
elif progress >= 50:
reward = (REWARD_MAX/4) * (progress/100)
elif progress >= 25:
reward = (REWARD_MAX/8) * (progress / 100)
elif progress >= 1:
reward = (REWARD_MAX/10) * (progress / 100)
# Give higher reward if the car is closer to center line and vice versa
if distance_from_center <= marker_1:
reward *= 1.0 * speed
if is_left_of_center:
reward *= reward * 0.1
elif distance_from_center <= marker_2:
reward *= 0.8 * speed
if is_left_of_center:
reward *= reward + 0.1
elif distance_from_center <= marker_3:
reward *= 0.3 * speed
if is_left_of_center:
reward *= reward + 0.1
elif distance_from_center <= marker_4:
reward *= 0.1 * speed
if is_left_of_center:
reward = reward + 0.1
else:
return float(REWARD_MIN) # likely crashed/ close to off track
# penalize reward if the car is steering way too much
if steering > ABS_STEERING_THRESHOLD:
reward *= 0.8
# make sure reward value returned is within the prescribed value range.
reward = max(reward, REWARD_MIN)
reward = min(reward, REWARD_MAX)
return float(reward)
| 31.308642 | 79 | 0.627366 |
ac86732f806514e916a01836105ec625898d67e9 | 41,183 | py | Python | localstack/services/sns/sns_listener.py | omps/localstack | 81e7e99eb04d9f0bc2eed9f38a5b70edd9c31675 | [
"Apache-2.0"
] | null | null | null | localstack/services/sns/sns_listener.py | omps/localstack | 81e7e99eb04d9f0bc2eed9f38a5b70edd9c31675 | [
"Apache-2.0"
] | null | null | null | localstack/services/sns/sns_listener.py | omps/localstack | 81e7e99eb04d9f0bc2eed9f38a5b70edd9c31675 | [
"Apache-2.0"
] | null | null | null | import ast
import asyncio
import base64
import datetime
import json
import logging
import time
import traceback
import uuid
from typing import Dict, List
from urllib.parse import parse_qs, urlparse
import requests
import six
import xmltodict
from flask import Response as FlaskResponse
from moto.sns.exceptions import DuplicateSnsEndpointError
from moto.sns.models import SNSBackend as MotoSNSBackend
from requests.models import Request, Response
from localstack.config import external_service_url
from localstack.constants import MOTO_ACCOUNT_ID, TEST_AWS_ACCOUNT_ID
from localstack.services.awslambda import lambda_api
from localstack.services.generic_proxy import RegionBackend
from localstack.utils.analytics import event_publisher
from localstack.utils.aws import aws_stack
from localstack.utils.aws.aws_responses import (
create_sqs_system_attributes,
parse_urlencoded_data,
requests_response_xml,
response_regex_replace,
)
from localstack.utils.aws.dead_letter_queue import sns_error_to_dead_letter_queue
from localstack.utils.cloudwatch.cloudwatch_util import store_cloudwatch_logs
from localstack.utils.common import (
json_safe,
long_uid,
md5,
not_none_or,
parse_request_data,
short_uid,
start_thread,
timestamp_millis,
to_bytes,
to_str,
)
from localstack.utils.persistence import PersistingProxyListener
# set up logger
LOG = logging.getLogger(__name__)
# additional attributes used for HTTP subscriptions
HTTP_SUBSCRIPTION_ATTRIBUTES = ["UnsubscribeURL"]
# actions to be skipped from persistence
SKIP_PERSISTENCE_ACTIONS = [
"Subscribe",
"ConfirmSubscription",
"Unsubscribe",
]
SNS_PROTOCOLS = [
"http",
"https",
"email",
"email-json",
"sms",
"sqs",
"application",
"lambda",
"firehose",
]
class SNSBackend(RegionBackend):
# maps topic ARN to list of subscriptions
sns_subscriptions: Dict[str, List[Dict]]
# maps subscription ARN to subscription status
subscription_status: Dict[str, Dict]
# maps topic ARN to list of tags
sns_tags: Dict[str, List[Dict]]
# cache of topic ARN to platform endpoint messages (used primarily for testing)
platform_endpoint_messages: Dict[str, List[Dict]]
# list of sent SMS messages - TODO: expose via internal API
sms_messages: List[Dict]
def __init__(self):
self.sns_subscriptions = {}
self.subscription_status = {}
self.sns_tags = {}
self.platform_endpoint_messages = {}
self.sms_messages = []
class ProxyListenerSNS(PersistingProxyListener):
def api_name(self):
return "sns"
def forward_request(self, method, path, data, headers):
if method == "OPTIONS":
return 200
# check region
try:
aws_stack.check_valid_region(headers)
aws_stack.set_default_region_in_headers(headers)
except Exception as e:
return make_error(message=str(e), code=400)
if method == "POST":
# parse payload and extract fields
req_data = parse_qs(to_str(data), keep_blank_values=True)
# parse data from query path
if not req_data:
parsed_path = urlparse(path)
req_data = parse_qs(parsed_path.query, keep_blank_values=True)
req_action = req_data["Action"][0]
topic_arn = (
req_data.get("TargetArn") or req_data.get("TopicArn") or req_data.get("ResourceArn")
)
if topic_arn:
topic_arn = topic_arn[0]
topic_arn = aws_stack.fix_account_id_in_arns(topic_arn)
if req_action == "SetSubscriptionAttributes":
sub = get_subscription_by_arn(req_data["SubscriptionArn"][0])
if not sub:
return make_error(message="Unable to find subscription for given ARN", code=400)
attr_name = req_data["AttributeName"][0]
attr_value = req_data["AttributeValue"][0]
sub[attr_name] = attr_value
return make_response(req_action)
elif req_action == "GetSubscriptionAttributes":
sub = get_subscription_by_arn(req_data["SubscriptionArn"][0])
if not sub:
return make_error(
message="Subscription with arn {0} not found".format(
req_data["SubscriptionArn"][0]
),
code=404,
code_string="NotFound",
)
content = "<Attributes>"
for key, value in sub.items():
if key in HTTP_SUBSCRIPTION_ATTRIBUTES:
continue
content += "<entry><key>%s</key><value>%s</value></entry>\n" % (
key,
value,
)
content += "</Attributes>"
return make_response(req_action, content=content)
elif req_action == "Subscribe":
if "Endpoint" not in req_data:
return make_error(message="Endpoint not specified in subscription", code=400)
if req_data["Protocol"][0] not in SNS_PROTOCOLS:
return make_error(
message=f"Invalid parameter: Amazon SNS does not support this protocol string: "
f"{req_data['Protocol'][0]}",
code=400,
)
if ".fifo" in req_data["Endpoint"][0] and ".fifo" not in topic_arn:
return make_error(
message="FIFO SQS Queues can not be subscribed to standard SNS topics",
code=400,
code_string="InvalidParameter",
)
elif req_action == "ConfirmSubscription":
if "TopicArn" not in req_data:
return make_error(
message="TopicArn not specified in confirm subscription request",
code=400,
)
if "Token" not in req_data:
return make_error(
message="Token not specified in confirm subscription request",
code=400,
)
do_confirm_subscription(req_data.get("TopicArn")[0], req_data.get("Token")[0])
elif req_action == "Unsubscribe":
if "SubscriptionArn" not in req_data:
return make_error(
message="SubscriptionArn not specified in unsubscribe request",
code=400,
)
do_unsubscribe(req_data.get("SubscriptionArn")[0])
elif req_action == "DeleteTopic":
do_delete_topic(topic_arn)
elif req_action == "Publish":
if req_data.get("Subject") == [""]:
return make_error(code=400, code_string="InvalidParameter", message="Subject")
if not req_data.get("Message") or all(
not message for message in req_data.get("Message")
):
return make_error(
code=400, code_string="InvalidParameter", message="Empty message"
)
if topic_arn and ".fifo" in topic_arn and not req_data.get("MessageGroupId"):
return make_error(
code=400,
code_string="InvalidParameter",
message="The MessageGroupId parameter is required for FIFO topics",
)
sns_backend = SNSBackend.get()
# No need to create a topic to send SMS or single push notifications with SNS
# but we can't mock a sending so we only return that it went well
if "PhoneNumber" not in req_data and "TargetArn" not in req_data:
if topic_arn not in sns_backend.sns_subscriptions:
return make_error(
code=404,
code_string="NotFound",
message="Topic does not exist",
)
message_id = publish_message(topic_arn, req_data, headers)
# return response here because we do not want the request to be forwarded to SNS backend
return make_response(req_action, message_id=message_id)
elif req_action == "PublishBatch":
response = publish_batch(topic_arn, req_data, headers)
return requests_response_xml(
req_action, response, xmlns="http://sns.amazonaws.com/doc/2010-03-31/"
)
elif req_action == "ListTagsForResource":
tags = do_list_tags_for_resource(topic_arn)
content = "<Tags/>"
if len(tags) > 0:
content = "<Tags>"
for tag in tags:
content += "<member>"
content += "<Key>%s</Key>" % tag["Key"]
content += "<Value>%s</Value>" % tag["Value"]
content += "</member>"
content += "</Tags>"
return make_response(req_action, content=content)
elif req_action == "CreateTopic":
sns_backend = SNSBackend.get()
topic_arn = aws_stack.sns_topic_arn(req_data["Name"][0])
tag_resource_success = self._extract_tags(topic_arn, req_data, True, sns_backend)
sns_backend.sns_subscriptions[topic_arn] = (
sns_backend.sns_subscriptions.get(topic_arn) or []
)
# in case if there is an error it returns an error , other wise it will continue as expected.
if not tag_resource_success:
return make_error(
code=400,
code_string="InvalidParameter",
message="Topic already exists with different tags",
)
elif req_action == "TagResource":
sns_backend = SNSBackend.get()
self._extract_tags(topic_arn, req_data, False, sns_backend)
return make_response(req_action)
elif req_action == "UntagResource":
tags_to_remove = []
req_tags = {k: v for k, v in req_data.items() if k.startswith("TagKeys.member.")}
req_tags = req_tags.values()
for tag in req_tags:
tags_to_remove.append(tag[0])
do_untag_resource(topic_arn, tags_to_remove)
return make_response(req_action)
data = self._reset_account_id(data)
return Request(data=data, headers=headers, method=method)
return True
@staticmethod
def _extract_tags(topic_arn, req_data, is_create_topic_request, sns_backend):
tags = []
req_tags = {k: v for k, v in req_data.items() if k.startswith("Tags.member.")}
existing_tags = sns_backend.sns_tags.get(topic_arn, None)
# TODO: use aws_responses.extract_tags(...) here!
for i in range(int(len(req_tags.keys()) / 2)):
key = req_tags["Tags.member." + str(i + 1) + ".Key"][0]
value = req_tags["Tags.member." + str(i + 1) + ".Value"][0]
tag = {"Key": key, "Value": value}
tags.append(tag)
# this means topic already created with empty tags and when we try to create it
# again with other tag value then it should fail according to aws documentation.
if is_create_topic_request and existing_tags is not None and tag not in existing_tags:
return False
do_tag_resource(topic_arn, tags)
return True
@staticmethod
def _reset_account_id(data):
"""Fix account ID in request payload. All external-facing responses contain our
predefined account ID (defaults to 000000000000), whereas the backend endpoint
from moto expects a different hardcoded account ID (123456789012)."""
return aws_stack.fix_account_id_in_arns(
data,
colon_delimiter="%3A",
existing=TEST_AWS_ACCOUNT_ID,
replace=MOTO_ACCOUNT_ID,
)
def return_response(self, method, path, data, headers, response):
# persist requests to disk
super(ProxyListenerSNS, self).return_response(method, path, data, headers, response)
if method == "POST" and path == "/":
# convert account IDs in ARNs
data = aws_stack.fix_account_id_in_arns(data, colon_delimiter="%3A")
aws_stack.fix_account_id_in_arns(response)
# remove "None" strings from result
search = r"<entry><key>[^<]+</key>\s*<value>\s*None\s*</[^>]+>\s*</entry>"
response_regex_replace(response, search, "")
# parse request and extract data
req_data = parse_qs(to_str(data))
req_action = req_data["Action"][0]
if req_action == "Subscribe" and response.status_code < 400:
response_data = xmltodict.parse(response.content)
topic_arn = (req_data.get("TargetArn") or req_data.get("TopicArn"))[0]
filter_policy = (req_data.get("FilterPolicy") or [None])[0]
attributes = get_subscribe_attributes(req_data)
sub_arn = response_data["SubscribeResponse"]["SubscribeResult"]["SubscriptionArn"]
do_subscribe(
topic_arn,
req_data["Endpoint"][0],
req_data["Protocol"][0],
sub_arn,
attributes,
filter_policy,
)
if req_action == "CreateTopic" and response.status_code < 400:
response_data = xmltodict.parse(response.content)
topic_arn = response_data["CreateTopicResponse"]["CreateTopicResult"]["TopicArn"]
# publish event
event_publisher.fire_event(
event_publisher.EVENT_SNS_CREATE_TOPIC,
payload={"t": event_publisher.get_hash(topic_arn)},
)
if req_action == "DeleteTopic" and response.status_code < 400:
# publish event
topic_arn = (req_data.get("TargetArn") or req_data.get("TopicArn"))[0]
event_publisher.fire_event(
event_publisher.EVENT_SNS_DELETE_TOPIC,
payload={"t": event_publisher.get_hash(topic_arn)},
)
def should_persist(self, method, path, data, headers, response):
req_params = parse_request_data(method, path, data)
action = req_params.get("Action", "")
if action in SKIP_PERSISTENCE_ACTIONS:
return False
return super(ProxyListenerSNS, self).should_persist(method, path, data, headers, response)
def patch_moto():
def patch_create_platform_endpoint(self, *args):
try:
return create_platform_endpoint_orig(self, *args)
except DuplicateSnsEndpointError:
custom_user_data, token = args[2], args[3]
for endpoint in self.platform_endpoints.values():
if endpoint.token == token:
if custom_user_data and custom_user_data != endpoint.custom_user_data:
raise DuplicateSnsEndpointError(
"Endpoint already exist for token: %s with different attributes" % token
)
return endpoint
create_platform_endpoint_orig = MotoSNSBackend.create_platform_endpoint
MotoSNSBackend.create_platform_endpoint = patch_create_platform_endpoint
patch_moto()
# instantiate listener
UPDATE_SNS = ProxyListenerSNS()
def unsubscribe_sqs_queue(queue_url):
"""Called upon deletion of an SQS queue, to remove the queue from subscriptions"""
sns_backend = SNSBackend.get()
for topic_arn, subscriptions in sns_backend.sns_subscriptions.items():
subscriptions = sns_backend.sns_subscriptions.get(topic_arn, [])
for subscriber in list(subscriptions):
sub_url = subscriber.get("sqs_queue_url") or subscriber["Endpoint"]
if queue_url == sub_url:
subscriptions.remove(subscriber)
def message_to_subscribers(
message_id,
message,
topic_arn,
req_data,
headers,
subscription_arn=None,
skip_checks=False,
message_attributes=None,
):
sns_backend = SNSBackend.get()
subscriptions = sns_backend.sns_subscriptions.get(topic_arn, [])
async def wait_for_messages_sent():
subs = [
message_to_subscriber(
message_id,
message,
topic_arn,
req_data,
headers,
subscription_arn,
skip_checks,
sns_backend,
subscriber,
subscriptions,
message_attributes,
)
for subscriber in list(subscriptions)
]
if subs:
await asyncio.wait(subs)
asyncio.run(wait_for_messages_sent())
async def message_to_subscriber(
message_id,
message,
topic_arn,
req_data,
headers,
subscription_arn,
skip_checks,
sns_backend,
subscriber,
subscriptions,
message_attributes,
):
if subscription_arn not in [None, subscriber["SubscriptionArn"]]:
return
filter_policy = json.loads(subscriber.get("FilterPolicy") or "{}")
if not message_attributes:
message_attributes = get_message_attributes(req_data)
if not skip_checks and not check_filter_policy(filter_policy, message_attributes):
LOG.info(
"SNS filter policy %s does not match attributes %s", filter_policy, message_attributes
)
return
if subscriber["Protocol"] == "sms":
event = {
"topic_arn": topic_arn,
"endpoint": subscriber["Endpoint"],
"message_content": req_data["Message"][0],
}
sns_backend.sms_messages.append(event)
LOG.info(
"Delivering SMS message to %s: %s",
subscriber["Endpoint"],
req_data["Message"][0],
)
# MOCK DATA
delivery = {
"phoneCarrier": "Mock Carrier",
"mnc": 270,
"priceInUSD": 0.00645,
"smsType": "Transactional",
"mcc": 310,
"providerResponse": "Message has been accepted by phone carrier",
"dwellTimeMsUntilDeviceAck": 200,
}
store_delivery_log(subscriber, True, message, message_id, delivery)
return
elif subscriber["Protocol"] == "sqs":
queue_url = None
try:
endpoint = subscriber["Endpoint"]
if "sqs_queue_url" in subscriber:
queue_url = subscriber.get("sqs_queue_url")
elif "://" in endpoint:
queue_url = endpoint
else:
queue_name = endpoint.split(":")[5]
queue_url = aws_stack.get_sqs_queue_url(queue_name)
subscriber["sqs_queue_url"] = queue_url
message_group_id = (
req_data.get("MessageGroupId")[0] if req_data.get("MessageGroupId") else ""
)
message_deduplication_id = (
req_data.get("MessageDeduplicationId")[0]
if req_data.get("MessageDeduplicationId")
else ""
)
sqs_client = aws_stack.connect_to_service("sqs")
kwargs = {}
if message_group_id:
kwargs["MessageGroupId"] = message_group_id
if message_deduplication_id:
kwargs["MessageDeduplicationId"] = message_deduplication_id
sqs_client.send_message(
QueueUrl=queue_url,
MessageBody=create_sns_message_body(subscriber, req_data, message_id),
MessageAttributes=create_sqs_message_attributes(subscriber, message_attributes),
MessageSystemAttributes=create_sqs_system_attributes(headers),
**kwargs,
)
store_delivery_log(subscriber, True, message, message_id)
except Exception as exc:
LOG.info("Unable to forward SNS message to SQS: %s %s", exc, traceback.format_exc())
store_delivery_log(subscriber, False, message, message_id)
sns_error_to_dead_letter_queue(subscriber["SubscriptionArn"], req_data, str(exc))
if "NonExistentQueue" in str(exc):
LOG.info(
'Removing non-existent queue "%s" subscribed to topic "%s"',
queue_url,
topic_arn,
)
subscriptions.remove(subscriber)
return
elif subscriber["Protocol"] == "lambda":
try:
external_url = external_service_url("sns")
unsubscribe_url = "%s/?Action=Unsubscribe&SubscriptionArn=%s" % (
external_url,
subscriber["SubscriptionArn"],
)
response = lambda_api.process_sns_notification(
subscriber["Endpoint"],
topic_arn,
subscriber["SubscriptionArn"],
message,
message_id,
message_attributes,
unsubscribe_url,
subject=req_data.get("Subject", [None])[0],
)
if response is not None:
delivery = {
"statusCode": response.status_code,
"providerResponse": response.get_data(),
}
store_delivery_log(subscriber, True, message, message_id, delivery)
if isinstance(response, Response):
response.raise_for_status()
elif isinstance(response, FlaskResponse):
if response.status_code >= 400:
raise Exception(
"Error response (code %s): %s" % (response.status_code, response.data)
)
except Exception as exc:
LOG.info(
"Unable to run Lambda function on SNS message: %s %s", exc, traceback.format_exc()
)
store_delivery_log(subscriber, False, message, message_id)
sns_error_to_dead_letter_queue(subscriber["SubscriptionArn"], req_data, str(exc))
return
elif subscriber["Protocol"] in ["http", "https"]:
msg_type = (req_data.get("Type") or ["Notification"])[0]
try:
message_body = create_sns_message_body(subscriber, req_data, message_id)
except Exception:
return
try:
response = requests.post(
subscriber["Endpoint"],
headers={
"Content-Type": "text/plain",
# AWS headers according to
# https://docs.aws.amazon.com/sns/latest/dg/sns-message-and-json-formats.html#http-header
"x-amz-sns-message-type": msg_type,
"x-amz-sns-topic-arn": subscriber["TopicArn"],
"x-amz-sns-subscription-arn": subscriber["SubscriptionArn"],
"User-Agent": "Amazon Simple Notification Service Agent",
},
data=message_body,
verify=False,
)
delivery = {
"statusCode": response.status_code,
"providerResponse": response.get_data(),
}
store_delivery_log(subscriber, True, message, message_id, delivery)
response.raise_for_status()
except Exception as exc:
LOG.info(
"Received error on sending SNS message, putting to DLQ (if configured): %s", exc
)
store_delivery_log(subscriber, False, message, message_id)
sns_error_to_dead_letter_queue(subscriber["SubscriptionArn"], req_data, str(exc))
return
elif subscriber["Protocol"] == "application":
try:
sns_client = aws_stack.connect_to_service("sns")
sns_client.publish(TargetArn=subscriber["Endpoint"], Message=message)
store_delivery_log(subscriber, True, message, message_id)
except Exception as exc:
LOG.warning(
"Unable to forward SNS message to SNS platform app: %s %s",
exc,
traceback.format_exc(),
)
store_delivery_log(subscriber, False, message, message_id)
sns_error_to_dead_letter_queue(subscriber["SubscriptionArn"], req_data, str(exc))
return
elif subscriber["Protocol"] in ["email", "email-json"]:
ses_client = aws_stack.connect_to_service("ses")
if subscriber.get("Endpoint"):
ses_client.verify_email_address(EmailAddress=subscriber.get("Endpoint"))
ses_client.verify_email_address(EmailAddress="admin@localstack.com")
ses_client.send_email(
Source="admin@localstack.com",
Message={
"Body": {
"Text": {
"Data": create_sns_message_body(
subscriber=subscriber, req_data=req_data, message_id=message_id
)
if subscriber["Protocol"] == "email-json"
else message
}
},
"Subject": {"Data": "SNS-Subscriber-Endpoint"},
},
Destination={"ToAddresses": [subscriber.get("Endpoint")]},
)
store_delivery_log(subscriber, True, message, message_id)
else:
LOG.warning('Unexpected protocol "%s" for SNS subscription', subscriber["Protocol"])
def publish_message(topic_arn, req_data, headers, subscription_arn=None, skip_checks=False):
sns_backend = SNSBackend.get()
message = req_data["Message"][0]
message_id = str(uuid.uuid4())
if topic_arn and ":endpoint/" in topic_arn:
# cache messages published to platform endpoints
cache = sns_backend.platform_endpoint_messages[topic_arn] = (
sns_backend.platform_endpoint_messages.get(topic_arn) or []
)
cache.append(req_data)
LOG.debug("Publishing message to TopicArn: %s | Message: %s", topic_arn, message)
start_thread(
lambda _: message_to_subscribers(
message_id,
message,
topic_arn,
req_data,
headers,
subscription_arn,
skip_checks,
)
)
return message_id
def publish_batch(topic_arn, req_data, headers):
response = {"Successful": [], "Failed": []}
messages = parse_urlencoded_data(
req_data, "PublishBatchRequestEntries.member", "MessageAttributes.entry"
)
for message in messages:
message_id = str(uuid.uuid4())
data = {}
data["TopicArn"] = [topic_arn]
data["Message"] = [message["Message"]]
data["Subject"] = [message["Subject"]]
message_attributes = prepare_message_attributes(message.get("MessageAttributes", []))
try:
message_to_subscribers(
message_id,
message["Message"],
topic_arn,
data,
headers,
message_attributes=message_attributes,
)
response["Successful"].append({"Id": message["Id"], "MessageId": message_id})
except Exception:
response["Failed"].append({"Id": message["Id"]})
return response
def do_delete_topic(topic_arn):
sns_backend = SNSBackend.get()
sns_backend.sns_subscriptions.pop(topic_arn, None)
sns_backend.sns_tags.pop(topic_arn, None)
def do_confirm_subscription(topic_arn, token):
sns_backend = SNSBackend.get()
for k, v in sns_backend.subscription_status.items():
if v["Token"] == token and v["TopicArn"] == topic_arn:
v["Status"] = "Subscribed"
def do_subscribe(topic_arn, endpoint, protocol, subscription_arn, attributes, filter_policy=None):
sns_backend = SNSBackend.get()
topic_subs = sns_backend.sns_subscriptions[topic_arn] = (
sns_backend.sns_subscriptions.get(topic_arn) or []
)
# An endpoint may only be subscribed to a topic once. Subsequent
# subscribe calls do nothing (subscribe is idempotent).
for existing_topic_subscription in topic_subs:
if existing_topic_subscription.get("Endpoint") == endpoint:
return
subscription = {
# http://docs.aws.amazon.com/cli/latest/reference/sns/get-subscription-attributes.html
"TopicArn": topic_arn,
"Endpoint": endpoint,
"Protocol": protocol,
"SubscriptionArn": subscription_arn,
"FilterPolicy": filter_policy,
}
subscription.update(attributes)
topic_subs.append(subscription)
if subscription_arn not in sns_backend.subscription_status:
sns_backend.subscription_status[subscription_arn] = {}
sns_backend.subscription_status[subscription_arn].update(
{"TopicArn": topic_arn, "Token": short_uid(), "Status": "Not Subscribed"}
)
# Send out confirmation message for HTTP(S), fix for https://github.com/localstack/localstack/issues/881
if protocol in ["http", "https"]:
token = short_uid()
external_url = external_service_url("sns")
subscription["UnsubscribeURL"] = "%s/?Action=Unsubscribe&SubscriptionArn=%s" % (
external_url,
subscription_arn,
)
confirmation = {
"Type": ["SubscriptionConfirmation"],
"Token": [token],
"Message": [
("You have chosen to subscribe to the topic %s.\n" % topic_arn)
+ "To confirm the subscription, visit the SubscribeURL included in this message."
],
"SubscribeURL": [
"%s/?Action=ConfirmSubscription&TopicArn=%s&Token=%s"
% (external_url, topic_arn, token)
],
}
publish_message(topic_arn, confirmation, {}, subscription_arn, skip_checks=True)
def do_unsubscribe(subscription_arn):
sns_backend = SNSBackend.get()
for topic_arn, existing_subs in sns_backend.sns_subscriptions.items():
sns_backend.sns_subscriptions[topic_arn] = [
sub for sub in existing_subs if sub["SubscriptionArn"] != subscription_arn
]
def _get_tags(topic_arn):
sns_backend = SNSBackend.get()
if topic_arn not in sns_backend.sns_tags:
sns_backend.sns_tags[topic_arn] = []
return sns_backend.sns_tags[topic_arn]
def do_list_tags_for_resource(topic_arn):
return _get_tags(topic_arn)
def do_tag_resource(topic_arn, tags):
sns_backend = SNSBackend.get()
existing_tags = sns_backend.sns_tags.get(topic_arn, [])
tags = [tag for idx, tag in enumerate(tags) if tag not in tags[:idx]]
def existing_tag_index(item):
for idx, tag in enumerate(existing_tags):
if item["Key"] == tag["Key"]:
return idx
return None
for item in tags:
existing_index = existing_tag_index(item)
if existing_index is None:
existing_tags.append(item)
else:
existing_tags[existing_index] = item
sns_backend.sns_tags[topic_arn] = existing_tags
def do_untag_resource(topic_arn, tag_keys):
sns_backend = SNSBackend.get()
sns_backend.sns_tags[topic_arn] = [t for t in _get_tags(topic_arn) if t["Key"] not in tag_keys]
# ---------------
# HELPER METHODS
# ---------------
def get_subscription_by_arn(sub_arn):
sns_backend = SNSBackend.get()
# TODO maintain separate map instead of traversing all items
for key, subscriptions in sns_backend.sns_subscriptions.items():
for sub in subscriptions:
if sub["SubscriptionArn"] == sub_arn:
return sub
def make_response(op_name, content="", message_id=None):
response = Response()
if not content:
message_id = message_id or str(uuid.uuid4())
content = "<MessageId>%s</MessageId>" % message_id
response._content = """<{op_name}Response xmlns="http://sns.amazonaws.com/doc/2010-03-31/">
<{op_name}Result>
{content}
</{op_name}Result>
<ResponseMetadata><RequestId>{req_id}</RequestId></ResponseMetadata>
</{op_name}Response>""".format(
op_name=op_name, content=content, req_id=short_uid()
)
response.status_code = 200
return response
# TODO move to utils!
def make_error(message, code=400, code_string="InvalidParameter"):
response = Response()
response._content = """<ErrorResponse xmlns="http://sns.amazonaws.com/doc/2010-03-31/"><Error>
<Type>Sender</Type>
<Code>{code_string}</Code>
<Message>{message}</Message>
</Error><RequestId>{req_id}</RequestId>
</ErrorResponse>""".format(
message=message, code_string=code_string, req_id=short_uid()
)
response.status_code = code
return response
def create_sns_message_body(subscriber, req_data, message_id=None):
message = req_data["Message"][0]
protocol = subscriber["Protocol"]
if six.PY2 and type(message).__name__ == "unicode":
# fix non-ascii unicode characters under Python 2
message = message.encode("raw-unicode-escape")
if req_data.get("MessageStructure") == ["json"]:
message = json.loads(message)
try:
message = message.get(protocol, message["default"])
except KeyError:
raise Exception("Unable to find 'default' key in message payload")
if is_raw_message_delivery(subscriber):
return message
data = {
"Type": req_data.get("Type", ["Notification"])[0],
"MessageId": message_id,
"TopicArn": subscriber["TopicArn"],
"Message": message,
"Timestamp": timestamp_millis(),
"SignatureVersion": "1",
# TODO Add a more sophisticated solution with an actual signature
# Hardcoded
"Signature": "EXAMPLEpH+..",
"SigningCertURL": "https://sns.us-east-1.amazonaws.com/SimpleNotificationService-0000000000000000000000.pem",
}
for key in ["Subject", "SubscribeURL", "Token"]:
if req_data.get(key):
data[key] = req_data[key][0]
for key in HTTP_SUBSCRIPTION_ATTRIBUTES:
if key in subscriber:
data[key] = subscriber[key]
attributes = get_message_attributes(req_data)
if attributes:
data["MessageAttributes"] = attributes
return json.dumps(data)
def create_sqs_message_attributes(subscriber, attributes):
if not is_raw_message_delivery(subscriber):
return {}
message_attributes = {}
for key, value in attributes.items():
if value.get("Type"):
attribute = {"DataType": value["Type"]}
if value["Type"] == "Binary":
attribute["BinaryValue"] = base64.decodebytes(to_bytes(value["Value"]))
else:
attribute["StringValue"] = str(value.get("Value", ""))
message_attributes[key] = attribute
return message_attributes
def prepare_message_attributes(message_attributes):
attributes = {}
for attr in message_attributes:
attributes[attr["Name"]] = {
"Type": attr["Value"]["DataType"],
"Value": attr["Value"]["StringValue"]
if attr["Value"].get("StringValue", None)
else attr["Value"]["BinaryValue"],
}
return attributes
def get_message_attributes(req_data):
extracted_msg_attrs = parse_urlencoded_data(req_data, "MessageAttributes.entry")
return prepare_message_attributes(extracted_msg_attrs)
def get_subscribe_attributes(req_data):
attributes = {}
for key in req_data.keys():
if ".key" in key:
attributes[req_data[key][0]] = req_data[key.replace("key", "value")][0]
defaults = {
# TODO: this is required to get TF "aws_sns_topic_subscription" working, but this should
# be revisited (e.g., cross-account subscriptions should not be confirmed automatically)
"PendingConfirmation": "false"
}
for key, value in defaults.items():
attributes[key] = attributes.get(key, value)
return attributes
def is_number(x):
try:
float(x)
return True
except ValueError:
return False
def evaluate_numeric_condition(conditions, value):
if not is_number(value):
return False
for i in range(0, len(conditions), 2):
value = float(value)
operator = conditions[i]
operand = float(conditions[i + 1])
if operator == "=":
if value != operand:
return False
elif operator == ">":
if value <= operand:
return False
elif operator == "<":
if value >= operand:
return False
elif operator == ">=":
if value < operand:
return False
elif operator == "<=":
if value > operand:
return False
return True
def evaluate_exists_condition(conditions, message_attributes, criteria):
# support for exists: false was added in april 2021
# https://aws.amazon.com/about-aws/whats-new/2021/04/amazon-sns-grows-the-set-of-message-filtering-operators/
if conditions:
return message_attributes.get(criteria) is not None
else:
return message_attributes.get(criteria) is None
def evaluate_condition(value, condition, message_attributes, criteria):
if type(condition) is not dict:
return value == condition
elif condition.get("exists") is not None:
return evaluate_exists_condition(condition.get("exists"), message_attributes, criteria)
elif value is None:
# the remaining conditions require the value to not be None
return False
elif condition.get("anything-but"):
return value not in condition.get("anything-but")
elif condition.get("prefix"):
prefix = condition.get("prefix")
return value.startswith(prefix)
elif condition.get("numeric"):
return evaluate_numeric_condition(condition.get("numeric"), value)
return False
def evaluate_filter_policy_conditions(conditions, attribute, message_attributes, criteria):
if type(conditions) is not list:
conditions = [conditions]
if attribute is not None and attribute["Type"] == "String.Array":
values = ast.literal_eval(attribute["Value"])
for value in values:
for condition in conditions:
if evaluate_condition(value, condition, message_attributes, criteria):
return True
else:
for condition in conditions:
value = attribute["Value"] if attribute is not None else None
if evaluate_condition(value, condition, message_attributes, criteria):
return True
return False
def check_filter_policy(filter_policy, message_attributes):
if not filter_policy:
return True
for criteria in filter_policy:
conditions = filter_policy.get(criteria)
attribute = message_attributes.get(criteria)
if (
evaluate_filter_policy_conditions(conditions, attribute, message_attributes, criteria)
is False
):
return False
return True
def is_raw_message_delivery(susbcriber):
return susbcriber.get("RawMessageDelivery") in ("true", True, "True")
def store_delivery_log(
subscriber: dict, success: bool, message: str, message_id: str, delivery: dict = None
):
log_group_name = subscriber.get("TopicArn", "").replace("arn:aws:", "").replace(":", "/")
log_stream_name = long_uid()
invocation_time = int(time.time() * 1000)
delivery = not_none_or(delivery, {})
delivery["deliveryId"] = (long_uid(),)
delivery["destination"] = (subscriber.get("Endpoint", ""),)
delivery["dwellTimeMs"] = 200
if not success:
delivery["attemps"] = 1
delivery_log = {
"notification": {
"messageMD5Sum": md5(message),
"messageId": message_id,
"topicArn": subscriber.get("TopicArn"),
"timestamp": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f%z"),
},
"delivery": delivery,
"status": "SUCCESS" if success else "FAILURE",
}
log_output = json.dumps(json_safe(delivery_log))
return store_cloudwatch_logs(log_group_name, log_stream_name, log_output, invocation_time)
| 37.202349 | 117 | 0.595416 |
58d255c3eeea31c9008728ca883ce47975a16609 | 7,317 | py | Python | tests/test_plugin.py | jairhenrique/pytest-testdox | ea8796fe09cfb10b7b00786453c690d4630f9076 | [
"MIT"
] | 42 | 2016-11-04T15:15:22.000Z | 2022-03-06T11:02:34.000Z | tests/test_plugin.py | renanivo/pytest-testdox | 86f8224b416a74ccc5b102b49c24a918c42d87d4 | [
"MIT"
] | 68 | 2017-03-08T16:15:23.000Z | 2022-03-10T18:35:48.000Z | tests/test_plugin.py | jairhenrique/pytest-testdox | ea8796fe09cfb10b7b00786453c690d4630f9076 | [
"MIT"
] | 9 | 2018-01-21T06:21:53.000Z | 2022-03-06T11:16:27.000Z | from collections import Counter
import pytest
from pytest_testdox import constants
class TestReport:
@pytest.fixture
def testdir(self, testdir):
testdir.makeconftest("""
pytest_plugins = 'pytest_testdox.plugin'
""")
return testdir
def test_should_print_a_green_passing_test(self, testdir):
testdir.makepyfile("""
def test_a_feature_is_working():
assert True
""")
result = testdir.runpytest('--force-testdox')
expected = '\033[92m ✓ a feature is working\033[0m'
assert expected in result.stdout.str()
def test_should_print_a_red_failing_test(self, testdir):
testdir.makepyfile("""
def test_a_failed_test_of_a_feature():
assert False
""")
result = testdir.runpytest('--force-testdox')
expected = '\033[91m ✗ a failed test of a feature\033[0m'
assert expected in result.stdout.str()
def test_should_print_a_yellow_skipped_test(self, testdir):
testdir.makepyfile("""
import pytest
@pytest.mark.skip
def test_a_skipped_test():
pass
""")
result = testdir.runpytest('--force-testdox')
expected = '\033[93m » a skipped test\033[0m'
assert expected in result.stdout.str()
def test_should_not_print_colors_when_disabled_by_parameter(self, testdir):
testdir.makepyfile("""
def test_a_feature_is_working():
assert True
""")
result = testdir.runpytest(
'--color=no',
'--force-testdox'
)
assert '\033[92m' not in result.stdout.str()
def test_should_output_plaintext_using_a_config_option(self, testdir):
testdir.makeini("""
[pytest]
testdox_format=plaintext
""")
testdir.makepyfile("""
def test_a_feature_is_working():
assert True
""")
result = testdir.runpytest('--force-testdox')
expected = '\033[92m [x] a feature is working\033[0m'
assert expected in result.stdout.str()
def test_should_print_the_test_class_name(self, testdir):
testdir.makepyfile("""
class TestFoo:
def test_foo(self):
pass
class TestBar:
def test_bar(self):
pass
""")
result = testdir.runpytest('--force-testdox')
lines = result.stdout.get_lines_after('Foo')
assert '✓ foo' in lines[0]
lines = result.stdout.get_lines_after('Bar')
assert '✓ bar' in lines[0]
def test_should_print_the_module_name_of_a_test_without_class(
self,
testdir
):
testdir.makefile('.py', test_module_name="""
def test_a_failed_test_of_a_feature():
assert False
""")
result = testdir.runpytest('--force-testdox')
result.stdout.fnmatch_lines(['module name'])
def test_should_print_test_summary(self, testdir):
testdir.makefile('.py', test_module_name="""
def test_a_passing_test():
assert True
""")
result = testdir.runpytest('--force-testdox')
assert '1 passed' in result.stdout.str()
def test_should_use_python_patterns_configuration(self, testdir):
testdir.makeini("""
[pytest]
python_classes=Describe*
python_files=*spec.py
python_functions=it*
""")
testdir.makefile('.py', module_spec="""
class DescribeTest:
def it_runs(self):
pass
""")
result = testdir.runpytest('--force-testdox')
lines = result.stdout.get_lines_after('Test')
assert '✓ runs' in lines[0]
def test_should_override_test_titles_with_title_mark(
self,
testdir
):
testdir.makefile('.py', test_module_name="""
import pytest
@pytest.mark.{}('''
My Title
My precious title
''')
def test_a_passing_test():
assert True
""".format(
constants.TITLE_MARK
))
result = testdir.runpytest('--force-testdox')
assert 'My Title\n My precious title' in result.stdout.str()
def test_should_override_class_names_with_class_name_mark(
self,
testdir
):
testdir.makefile('.py', test_module_name="""
import pytest
@pytest.mark.{}('''
My Class
My precious class
''')
class TestClass:
def test_foo(self):
pass
""".format(
constants.CLASS_NAME_MARK
))
result = testdir.runpytest('--force-testdox')
assert 'My Class\nMy precious class' in result.stdout.str()
def test_should_override_test_titles_with_title_mark_parametrize(
self,
testdir
):
testdir.makefile('.py', test_module_name="""
import pytest
@pytest.mark.parametrize('par', ['param1', 'param2'])
@pytest.mark.{}('should pass with parameters')
def test_a_passing_test(par):
assert True
""".format(
constants.TITLE_MARK
))
result = testdir.runpytest('--force-testdox')
assert 'should pass with parameters[param1]' in result.stdout.str()
assert 'should pass with parameters[param2]' in result.stdout.str()
def test_decorator_order_should_not_affect_parametrize(
self,
testdir
):
testdir.makefile('.py', test_module_name="""
import pytest
@pytest.mark.{}('should pass with parameters')
@pytest.mark.parametrize('par', ['param1', 'param2'])
def test_a_passing_test(par):
assert True
""".format(
constants.TITLE_MARK
))
result = testdir.runpytest('--force-testdox')
assert 'should pass with parameters[param1]' in result.stdout.str()
assert 'should pass with parameters[param2]' in result.stdout.str()
def test_should_not_enable_plugin_when_test_run_out_of_tty(self, testdir):
testdir.makepyfile("""
def test_a_feature_is_working():
assert True
""")
result = testdir.runpytest('--testdox')
expected_testdox_output = '\033[92m ✓ a feature is working\033[0m'
assert expected_testdox_output not in result.stdout.str()
def test_should_not_aggregate_tests_under_same_class_in_different_modules(
self, testdir
):
testdir.makepyfile(
test_first="""
class TestFoo(object):
def test_a_feature_is_working(self):
assert True
""",
test_second="""
class TestFoo(object):
def test_a_feature_is_working_in_another_module(self):
assert True
"""
)
result = testdir.runpytest('--force-testdox')
word_count = Counter(result.stdout.lines)
assert word_count['Foo'] == 2
| 29.035714 | 79 | 0.569769 |
32913606e5cbb2287150be187eadc93854106ae1 | 400 | py | Python | Homework 1/data_cleaning.py | rukmal/FE-621-Homework | 9c7cef7931b58aed54867acd8e8cf1928bc6d2dd | [
"MIT"
] | 4 | 2020-04-29T04:34:50.000Z | 2021-11-11T07:49:08.000Z | Homework 1/data_cleaning.py | rukmal/FE-621-Homework | 9c7cef7931b58aed54867acd8e8cf1928bc6d2dd | [
"MIT"
] | null | null | null | Homework 1/data_cleaning.py | rukmal/FE-621-Homework | 9c7cef7931b58aed54867acd8e8cf1928bc6d2dd | [
"MIT"
] | 1 | 2020-04-23T07:32:44.000Z | 2020-04-23T07:32:44.000Z | # Script to rename option files, from the R data download script format to
# OOC-compliant names.
from context import fe621
import os
option_file_paths = [os.getcwd() + i for i in ['/Homework 1/data/DATA1/AMZN',
'/Homework 1/data/DATA1/SPY']]
for option_file_path in option_file_paths:
fe621.util.renameOptionFiles(folder_path=option_file_path)
| 30.769231 | 77 | 0.68 |
ca0915a95f740966ebf1bcbce096a3484626cefa | 4,108 | py | Python | qiskit/aqua/algorithms/classifiers/qsvm/_qsvm_multiclass.py | hushaohan/aqua | 8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d | [
"Apache-2.0"
] | 2 | 2020-06-29T16:08:12.000Z | 2020-08-07T22:42:13.000Z | qiskit/aqua/algorithms/classifiers/qsvm/_qsvm_multiclass.py | hushaohan/aqua | 8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d | [
"Apache-2.0"
] | null | null | null | qiskit/aqua/algorithms/classifiers/qsvm/_qsvm_multiclass.py | hushaohan/aqua | 8512bc6ce246a8b3cca1e5edb1703b6885aa7c5d | [
"Apache-2.0"
] | 1 | 2022-01-25T07:09:10.000Z | 2022-01-25T07:09:10.000Z | # -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""The multiclass classifier."""
import logging
import numpy as np
from qiskit.aqua.utils import map_label_to_class_name
from ._qsvm_abc import _QSVM_ABC
logger = logging.getLogger(__name__)
# pylint: disable=invalid-name
class _QSVM_Multiclass(_QSVM_ABC):
"""
The multiclass classifier.
the classifier is built by wrapping the estimator
(for binary classification) with the multiclass extensions
"""
def __init__(self, qalgo, multiclass_classifier):
super().__init__(qalgo)
self.multiclass_classifier = multiclass_classifier
self.multiclass_classifier.params.append(qalgo)
def train(self, data, labels):
""" train """
self.multiclass_classifier.train(data, labels)
def test(self, data, labels):
""" test """
accuracy = self.multiclass_classifier.test(data, labels)
self._ret['testing_accuracy'] = accuracy
self._ret['test_success_ratio'] = accuracy
return accuracy
def predict(self, data):
""" predict """
predicted_labels = self.multiclass_classifier.predict(data)
self._ret['predicted_labels'] = predicted_labels
return predicted_labels
def run(self):
"""
put the train, test, predict together
"""
self.train(self._qalgo.training_dataset[0], self._qalgo.training_dataset[1])
if self._qalgo.test_dataset is not None:
self.test(self._qalgo.test_dataset[0], self._qalgo.test_dataset[1])
if self._qalgo.datapoints is not None:
predicted_labels = self.predict(self._qalgo.datapoints)
predicted_classes = \
map_label_to_class_name(predicted_labels, self._qalgo.label_to_class)
self._ret['predicted_classes'] = predicted_classes
return self._ret
def load_model(self, file_path):
""" load model """
model_npz = np.load(file_path, allow_pickle=True)
for i in range(len(self.multiclass_classifier.estimators)):
self.multiclass_classifier.estimators.ret['svm']['alphas'] = \
model_npz['alphas_{}'.format(i)]
self.multiclass_classifier.estimators.ret['svm']['bias'] = \
model_npz['bias_{}'.format(i)]
self.multiclass_classifier.estimators.ret['svm']['support_vectors'] = \
model_npz['support_vectors_{}'.format(i)]
self.multiclass_classifier.estimators.ret['svm']['yin'] = model_npz['yin_{}'.format(i)]
try:
self._qalgo.class_to_label = model_npz['class_to_label']
self._qalgo.label_to_class = model_npz['label_to_class']
except KeyError as ex:
logger.warning("The model saved in Aqua 0.5 does not contain the mapping "
"between class names and labels. "
"Please setup them and save the model again "
"for further use. Error: %s", str(ex))
def save_model(self, file_path):
""" save model """
model = {}
for i, estimator in enumerate(self.multiclass_classifier.estimators):
model['alphas_{}'.format(i)] = estimator.ret['svm']['alphas']
model['bias_{}'.format(i)] = estimator.ret['svm']['bias']
model['support_vectors_{}'.format(i)] = estimator.ret['svm']['support_vectors']
model['yin_{}'.format(i)] = estimator.ret['svm']['yin']
model['class_to_label'] = self._qalgo.class_to_label
model['label_to_class'] = self._qalgo.label_to_class
np.savez(file_path, **model)
| 39.5 | 99 | 0.6463 |
ea543c1a37635daac456f0f79666f376baac2d4b | 604 | py | Python | setup.py | IIKovalenko/python-sdk | 980e2c5d848eadb42799132b35a9f58ab7b27157 | [
"MIT"
] | 1 | 2019-06-07T10:45:58.000Z | 2019-06-07T10:45:58.000Z | setup.py | IIKovalenko/python-sdk | 980e2c5d848eadb42799132b35a9f58ab7b27157 | [
"MIT"
] | null | null | null | setup.py | IIKovalenko/python-sdk | 980e2c5d848eadb42799132b35a9f58ab7b27157 | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
packages = find_packages('.', include=['yandexcloud*', 'yandex*'])
setup(name='yandexcloud',
version='0.9.0',
description='The Yandex.Cloud official SDK',
url='https://github.com/yandex-cloud/python-sdk',
author='Yandex LLC',
author_email='cloud@support.yandex.ru',
license='MIT',
install_requires=[
'cryptography',
'grpcio>=0.17.0',
'googleapis-common-protos',
'pyjwt',
'six',
],
tests_require=['pytest'],
packages=packages,
zip_safe=False)
| 27.454545 | 66 | 0.594371 |
8d5707735402ded3d3375eaed1c117c95ae7c53e | 4,155 | py | Python | scripts/generate_gan_data.py | ssundaram21/6.819FinalProjectRAMP | ff7ec9020f480b0cd966c283a80d1871998d5a06 | [
"MIT"
] | 2 | 2021-10-07T20:30:22.000Z | 2022-03-20T18:48:14.000Z | scripts/generate_gan_data.py | ssundaram21/6.819FinalProjectRAMP | ff7ec9020f480b0cd966c283a80d1871998d5a06 | [
"MIT"
] | null | null | null | scripts/generate_gan_data.py | ssundaram21/6.819FinalProjectRAMP | ff7ec9020f480b0cd966c283a80d1871998d5a06 | [
"MIT"
] | null | null | null | import pickle
import numpy as np
import pandas as pd
import tensorflow as tf
import PIL.Image
import imageio
# import tfutils
import matplotlib.pyplot as plt
import os
import sys
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
network_path = "/om/user/shobhita/src/chexpert/CheXpert GAN/"
output_data_path = "/om/user/shobhita/src/chexpert/gan_fake_data/"
real_data_path = "/om/user/shobhita/src/chexpert/data/"
names = ['No Finding', 'Enlarged Cardiomediastinum', 'Cardiomegaly', 'Lung Opacity',
'Lung Lesion', 'Edema', 'Consolidation', 'Pneumonia', 'Atelectasis',
'Pneumothorax', 'Pleural Effusion', 'Pleural Other', 'Fracture',
'Support Devices']
tf.InteractiveSession()
# Import pretrained Chexpert GAN.
with open(network_path + "network-final.pkl", 'rb') as file:
G, D, Gs = pickle.load(file)
real_labels = pd.read_csv(real_data_path + "CheXpert-v1.0-small/train_preprocessed.csv")
classes_to_generate = ["Lung Lesion", "Pleural Other", "Fracture"]
total = len(real_labels)
lesion = sum(real_labels["Lung Lesion"])
pleural = sum(real_labels["Pleural Other"])
fracture = sum(real_labels["Fracture"])
lesion_n, pleural_n, fracture_n = int(lesion*1.65), int(pleural*3.65), int(fracture*1.95)
total_gen = total + lesion_n + pleural_n + fracture_n
print("Lesion: {}/{} + {} --> {}/{}".format(lesion, lesion/total, lesion_n, lesion+lesion_n, (lesion+lesion_n)/(total+total_gen)))
print("Pleural: {}/{} + {} --> {}/{}".format(pleural, pleural/total, pleural_n, pleural+pleural_n, (pleural+pleural_n)/(total + total_gen)))
print("Fracture: {}/{} + {} --> {}/{}".format(fracture, fracture/total, fracture_n, fracture+fracture_n, (fracture+fracture_n)/(total + total_gen)))
sys.stdout.flush()
label_vectors = {}
for cat, n in zip(classes_to_generate, [lesion_n, pleural_n, fracture_n]):
relevant_labels = real_labels[real_labels[cat] == 1]
new_labels = relevant_labels.sample(n, replace=True)[names].to_numpy()
label_vectors[cat] = new_labels
for cat, arr in label_vectors.items():
print("{}: {}".format(cat, arr.shape))
label_vectors = {}
for cat, n in zip(classes_to_generate, [lesion_n, pleural_n, fracture_n]):
relevant_labels = real_labels[real_labels[cat] == 1]
new_labels = relevant_labels.sample(n, replace=True)[names].to_numpy()
label_vectors[cat] = new_labels
for cat, arr in label_vectors.items():
print("{}: {}".format(cat, arr.shape))
labels_save = {}
for cat in classes_to_generate:
labels = label_vectors[cat]
batch = 1
used_labels = []
used_imgname = []
latents_raw = np.random.RandomState(1000).randn(labels.shape[0], *Gs.input_shapes[0][1:])
total_num = latents_raw.shape[0]
print("Generating {}".format(cat))
sys.stdout.flush()
for n in range(int(total_num / batch)):
if n % 1000 == 0:
print("{}/{}".format(n, total_num))
latent_vec = latents_raw[n * batch: (n + 1) * batch, :]
label_vec = labels[n * batch: (n + 1) * batch, :]
used_labels.append(label_vec)
images = Gs.run(latent_vec, label_vec)
images = np.clip(np.rint((images + 1.0) / 2.0 * 255.0), 0.0, 255.0).astype(np.uint8) # [-1,1] => [0,255]
images = images.transpose(0, 2, 3, 1) # NCHW => NHWC
save_images = np.squeeze(images, axis=-1)
data_dir = output_data_path
if not os.path.exists(data_dir):
os.makedirs(data_dir)
for idx in range(save_images.shape[0]):
image_idx = idx + batch * n
labels_save["{}_{}".format(cat, image_idx)] = labels[image_idx, :]
store_name = 'fake_{}_{}.png'.format(cat, image_idx)
used_imgname.append(store_name)
store_path = os.path.join(data_dir, store_name)
imageio.imwrite(store_path, save_images[idx])
print("Done with {}".format(cat))
print(len(labels))
print(len(used_labels))
print(len(used_imgname))
sys.stdout.flush()
with open(output_data_path + "gan_image_labels.pkl", "wb") as handle:
pickle.dump(labels_save, handle)
sys.stdout.flush()
print("Done :)") | 37.098214 | 148 | 0.672443 |
94cc323e3338ad2dbbf5bfc5efcc5caa570b9651 | 1,824 | py | Python | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ResolveETLJobSqlSchemaRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ResolveETLJobSqlSchemaRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | aliyun-python-sdk-emr/aliyunsdkemr/request/v20160408/ResolveETLJobSqlSchemaRequest.py | sdk-team/aliyun-openapi-python-sdk | 384730d707e6720d1676ccb8f552e6a7b330ec86 | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class ResolveETLJobSqlSchemaRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Emr', '2016-04-08', 'ResolveETLJobSqlSchema')
def get_StageName(self):
return self.get_query_params().get('StageName')
def set_StageName(self,StageName):
self.add_query_param('StageName',StageName)
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_EtlJobId(self):
return self.get_query_params().get('EtlJobId')
def set_EtlJobId(self,EtlJobId):
self.add_query_param('EtlJobId',EtlJobId)
def get_DataSourceId(self):
return self.get_query_params().get('DataSourceId')
def set_DataSourceId(self,DataSourceId):
self.add_query_param('DataSourceId',DataSourceId)
def get_Sql(self):
return self.get_query_params().get('Sql')
def set_Sql(self,Sql):
self.add_query_param('Sql',Sql) | 33.777778 | 75 | 0.763706 |
ad21025667490d00e09808810c9e38ce771e5cb6 | 1,396 | py | Python | graph/Ratndeep/Distance of nearest cell having 1.py | ayroti-18/Competitive-Programming | d1ea08a91c63e54f0bba2365e56f98db71eb9054 | [
"MIT"
] | 3 | 2020-12-20T10:23:11.000Z | 2021-06-16T10:34:18.000Z | graph/Ratndeep/Distance of nearest cell having 1.py | Spring-dot/Competitive-Programming | 98add277a8b029710c749d1082de25c524e12408 | [
"MIT"
] | null | null | null | graph/Ratndeep/Distance of nearest cell having 1.py | Spring-dot/Competitive-Programming | 98add277a8b029710c749d1082de25c524e12408 | [
"MIT"
] | null | null | null | from collections import defaultdict
def BFS(self, s):
visited = [False] * (len(self.graph))
queue = []
queue.append(s)
visited[s] = True
while queue:
s = queue.pop(0)
print (s, end = " ")
for i in self.graph[s]:
if visited[i] == False:
queue.append(i)
visited[i] = True
def DFSutil(matrix,visited,row,col,x,y)
x_axis = [0,0,1,-1,1,-1-1,1]
y_axis = [1,-1,0,0,1,-1,1,-1]
for i in range(8):
new_x = x+x_axis[i]
new_y = y+y_axis[i]
if safe(new_x,new_y,visited,row,col):
visited[new_x][new_y]=True
if matrix[new_x][new_y]==1:
return new_x,new_y
def DFS(matrix,row,col):
visited = [[False for i in range(col)]for i in range(row)]
graph = defaultdict(list)
for i in range(row):
for j in range(col):
graph[i].append(j)
x,y=DFSutil(matrix,visited,row,col,i,j)
print(abs(i-x)+abs(j-y),end=" ")
print()
for _ in range(int(input())):
row,col = map(int,input().split())
list_matrix = list(map(int,input().split())))
index=0
for i in range(row):
temp_matrix=[]
for j in range(col):
temp_matrix.append(list_matrix[index])
index+=1
matrix.append(temp_matrix)
DFS(matrix)
| 26.339623 | 62 | 0.517192 |
3d251ae53a2e3c507f00e71685bd921ffb37b099 | 371 | py | Python | Scrapy_CrawlSpider/pipelines.py | SunDevilThor/Scrapy_CrawlSpider | 5472f31b4974a05999fb21f979ea22765165ff66 | [
"MIT"
] | null | null | null | Scrapy_CrawlSpider/pipelines.py | SunDevilThor/Scrapy_CrawlSpider | 5472f31b4974a05999fb21f979ea22765165ff66 | [
"MIT"
] | null | null | null | Scrapy_CrawlSpider/pipelines.py | SunDevilThor/Scrapy_CrawlSpider | 5472f31b4974a05999fb21f979ea22765165ff66 | [
"MIT"
] | null | null | null | # Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
class ScrapyCrawlspiderPipeline:
def process_item(self, item, spider):
return item
| 26.5 | 66 | 0.773585 |
245d46d1545f6166b18a6990b8334073db0c747d | 596 | py | Python | classwork/02_27_2021/wikipedia.py | Katsute/Baruch-CIS-3120-Assignments | 2cb470a7e3b7bf2d49da520fdff079f832624c06 | [
"CC0-1.0"
] | null | null | null | classwork/02_27_2021/wikipedia.py | Katsute/Baruch-CIS-3120-Assignments | 2cb470a7e3b7bf2d49da520fdff079f832624c06 | [
"CC0-1.0"
] | null | null | null | classwork/02_27_2021/wikipedia.py | Katsute/Baruch-CIS-3120-Assignments | 2cb470a7e3b7bf2d49da520fdff079f832624c06 | [
"CC0-1.0"
] | 1 | 2022-01-12T18:19:11.000Z | 2022-01-12T18:19:11.000Z | from typing import List
import requests
from bs4 import BeautifulSoup, Tag
url: str = "https://en.wikipedia.org/wiki/List_of_United_States_cities_by_population"
soup: BeautifulSoup = BeautifulSoup(requests.get(url).content, "html.parser")
table: Tag = soup.find("table", class_="wikitable sortable")
grid: List[List[str]] = []
for row in table.find_all("tr"):
cells: List[Tag] = row.findAll("td")
if len(cells) > 0:
for i in range(len(cells)):
if i+1 > len(grid):
grid.append([])
grid[i].append(cells[i].text.rstrip())
print(grid)
| 23.84 | 85 | 0.651007 |
c55587a3d1679457f8fcc065f56a78240c927883 | 1,320 | py | Python | tests/etl_test/config1.py | topdown618/aliyun-log-python-sdk | 395949a5c307722e8223d926b366c50dacd32126 | [
"MIT"
] | 130 | 2017-03-31T07:41:46.000Z | 2022-03-27T14:31:22.000Z | tests/etl_test/config1.py | topdown618/aliyun-log-python-sdk | 395949a5c307722e8223d926b366c50dacd32126 | [
"MIT"
] | 170 | 2017-02-17T06:07:31.000Z | 2022-03-17T02:32:42.000Z | tests/etl_test/config1.py | topdown618/aliyun-log-python-sdk | 395949a5c307722e8223d926b366c50dacd32126 | [
"MIT"
] | 111 | 2017-01-16T07:35:01.000Z | 2022-03-18T03:31:32.000Z | from aliyun.log.etl_core import *
KEEP_EVENT_default = True
KEEP_EVENT_pass_or_failure = [{"result": r"(?i)ok|pass"}, {"status": lambda v: int(v) == 200},
lambda e: (('status' in e and int(e['status']) > 200) or ('__raw__' in e and 'error' in e['__raw__']))]
DROP_EVENT_useless = {"data": "useless code.+|useless2 code.+", "raw": "bad data.+"}
KEEP_FIELDS_all = '.+'
DROP_FIELDS_none = ""
RENAME_FIELDS_simple = {"f1": "f1_new", "f2": "f2_new"}
def sls_eu_my_logic(event):
event["hello"] = "world"
return event
DISPATCH_EVENT_data = [
({"data": "^ETL_Information .+"}, {"__topic__": "etl_info"}),
({"data": "^Status .+"}, {"__topic__": "machine_status"}),
({"data": "^System Reboot .+"}, {"__topic__": "reboot_event"}),
({"data": "^Provision Firmware Download start .+"}, {"__topic__": "download"}),
(True, {"__topic__": "unknown"})]
@condition({"__topic__": "etl_info"})
def sls_eu_parse_data(event):
return event
TRANSFORM_EVENT_data = [
({"__topic__": "etl_info"}, {"__topic__": "etl_info"}),
({"__topic__": "machine_status"}, {"__topic__": "machine_status"}),
({"__topic__": "reboot_event"}, {"__topic__": "reboot_event"}),
({"__topic__": "download"}, {"__topic__": "download"}),
(True, {"__topic__": "unknown"})]
| 32.195122 | 139 | 0.60303 |
98d91e51765f9ac825a557d3ee98ba9036dcd529 | 1,314 | py | Python | altar/altar/bayesian/Controller.py | lijun99/altar | 92c2915de3de0c51138d382c8192ead7d6eed1a1 | [
"BSD-3-Clause"
] | 6 | 2019-07-25T08:02:09.000Z | 2022-02-09T04:19:31.000Z | altar/altar/bayesian/Controller.py | lijun99/altar | 92c2915de3de0c51138d382c8192ead7d6eed1a1 | [
"BSD-3-Clause"
] | null | null | null | altar/altar/bayesian/Controller.py | lijun99/altar | 92c2915de3de0c51138d382c8192ead7d6eed1a1 | [
"BSD-3-Clause"
] | null | null | null | # -*- python -*-
# -*- coding: utf-8 -*-
#
# michael a.g. aïvázis <michael.aivazis@para-sim.com>
#
# (c) 2013-2021 parasim inc
# (c) 2010-2021 california institute of technology
# all rights reserved
#
# get the package
import altar
# the controller protocol
class Controller(altar.protocol, family="altar.controllers"):
"""
The protocol that all AlTar controllers must implement
"""
# required user configurable state
dispatcher = altar.simulations.dispatcher()
dispatcher.doc = "the event dispatcher that activates the registered handlers"
archiver = altar.simulations.archiver()
archiver.doc = "the archiver of simulation state"
# required behavior
@altar.provides
def posterior(self, model):
"""
Sample the posterior distribution of the given {model}
"""
@altar.provides
def initialize(self, application):
"""
Initialize me and my parts given an {application} context
"""
# framework hooks
@classmethod
def pyre_default(cls, **kwds):
"""
Supply a default implementation
"""
# by default, we do CATMIP as encapsulated by the {Annealer} class
from .Annealer import Annealer as default
# and return it
return default
# end of file
| 22.655172 | 82 | 0.648402 |
17ac5d72935336f1b7f5e2b725af80657980313d | 56 | py | Python | Codeforces/C_A_and_B_and_Team_Training.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | Codeforces/C_A_and_B_and_Team_Training.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | Codeforces/C_A_and_B_and_Team_Training.py | anubhab-code/Competitive-Programming | de28cb7d44044b9e7d8bdb475da61e37c018ac35 | [
"MIT"
] | null | null | null | n,m = map(int, input().split())
print(min(n,m,(n+m)//3)) | 28 | 31 | 0.553571 |
ca7910fcc167a0bd60189a3e6b673553dc9fe35f | 1,122 | py | Python | neural_network.py | petomuro/Snake_SL | 8f2e4d00733ef56dad0893c009510bb0f0a6c154 | [
"MIT"
] | null | null | null | neural_network.py | petomuro/Snake_SL | 8f2e4d00733ef56dad0893c009510bb0f0a6c154 | [
"MIT"
] | null | null | null | neural_network.py | petomuro/Snake_SL | 8f2e4d00733ef56dad0893c009510bb0f0a6c154 | [
"MIT"
] | null | null | null | import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import Sequential, layers, Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.optimizers import Adam
from datetime import datetime
class NeuralNetwork:
def __init__(self, no_of_layers, no_of_neurons, lr):
self.no_of_layers = no_of_layers
self.no_of_neurons = no_of_neurons
self.lr = lr
def model(self):
self.network = Sequential()
self.network.add(Input(shape=(8,)))
for _ in range(self.no_of_layers):
self.network.add(Dense(self.no_of_neurons, activation='relu'))
self.network.add(Dense(4, activation='softmax'))
self.network.summary()
opt = Adam(learning_rate=self.lr)
self.network.compile(optimizer=opt, loss='mse', metrics=['accuracy'])
return self.network
def save_weights(self):
self.network.save('weights/model' +
str(datetime.now().strftime("%Y%m%d%H%M%S")) + '.h5')
def load_weights_(self):
self.network.load_weights('weights/model20210525141121.h5')
| 30.324324 | 79 | 0.668449 |
40c3d5013f776109c1c0dc10b1b24d2b69704641 | 793 | py | Python | wwwhero/migrations/0004_charactercooldown.py | IharSha/build_a_hero | 4a0f0aa701c205d04edd6bc801707a73bcc210f2 | [
"BSD-3-Clause"
] | null | null | null | wwwhero/migrations/0004_charactercooldown.py | IharSha/build_a_hero | 4a0f0aa701c205d04edd6bc801707a73bcc210f2 | [
"BSD-3-Clause"
] | 2 | 2021-01-08T11:53:33.000Z | 2021-09-23T07:04:20.000Z | wwwhero/migrations/0004_charactercooldown.py | IharSha/build_a_hero | 4a0f0aa701c205d04edd6bc801707a73bcc210f2 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.1.4 on 2021-01-05 18:55
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wwwhero', '0003_characterselection'),
]
operations = [
migrations.CreateModel(
name='CharacterCooldown',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(choices=[('Level', 'Level'), ('Skill', 'Skill')], default='Level', max_length=5)),
('until', models.DateTimeField()),
('character', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='wwwhero.character')),
],
),
]
| 33.041667 | 124 | 0.604035 |
543ddf40edddbc3ea64f63ba1239492159469b09 | 3,812 | py | Python | mmtbx/regression/real_space_refine_chain/tst_01.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 2 | 2021-03-18T12:31:57.000Z | 2022-03-14T06:27:06.000Z | mmtbx/regression/real_space_refine_chain/tst_01.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | null | null | null | mmtbx/regression/real_space_refine_chain/tst_01.py | hbrunie/cctbx_project | 2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5 | [
"BSD-3-Clause-LBNL"
] | 1 | 2021-03-26T12:52:30.000Z | 2021-03-26T12:52:30.000Z | from __future__ import absolute_import, division, print_function
import time, os
import iotbx.pdb
import mmtbx.utils
from mmtbx import monomer_library
from scitbx.array_family import flex
import mmtbx.refinement.real_space.explode_and_refine
from mmtbx.geometry_restraints import reference
from iotbx import reflection_file_reader
import libtbx.load_env
def ccp4_map(crystal_symmetry, file_name, map_data):
from iotbx import mrcfile
mrcfile.write_ccp4_map(
file_name=file_name,
unit_cell=crystal_symmetry.unit_cell(),
space_group=crystal_symmetry.space_group(),
#gridding_first=(0,0,0),# This causes a bug (map gets shifted)
#gridding_last=n_real, # This causes a bug (map gets shifted)
map_data=map_data,
labels=flex.std_string([""]))
def run(prefix="tst_00"):
# Poor model that we want to refine so it matches the answer
pdb_file = libtbx.env.find_in_repositories(
relative_path="mmtbx/regression/real_space_refine_chain/poor_model.pdb",
test=os.path.isfile)
mtz_file = libtbx.env.find_in_repositories(
relative_path="mmtbx/regression/real_space_refine_chain/poor_map.mtz",
test=os.path.isfile)
pdb_inp = iotbx.pdb.input(file_name=pdb_file)
ph_poor = pdb_inp.construct_hierarchy()
ph_poor.atoms().reset_i_seq()
xrs_poor = pdb_inp.xray_structure_simple()
# Initialize states accumulator
states = mmtbx.utils.states(pdb_hierarchy=ph_poor, xray_structure=xrs_poor)
states.add(sites_cart = xrs_poor.sites_cart())
# Compute target map
mas = reflection_file_reader.any_reflection_file(file_name =
mtz_file).as_miller_arrays()
assert len(mas)==1
fc = mas[0]
fft_map = fc.fft_map(resolution_factor = 0.25)
fft_map.apply_sigma_scaling()
target_map_data = fft_map.real_map_unpadded()
ccp4_map(crystal_symmetry=fc.crystal_symmetry(), file_name="map.ccp4",
map_data=target_map_data)
# Build geometry restraints
params = monomer_library.pdb_interpretation.master_params.extract()
params.nonbonded_weight=200
#params.peptide_link.ramachandran_restraints=True
#params.peptide_link.rama_potential="oldfield"
#print dir(params)
#STOP()
processed_pdb_file = monomer_library.pdb_interpretation.process(
mon_lib_srv = monomer_library.server.server(),
ener_lib = monomer_library.server.ener_lib(),
file_name = pdb_file,
params = params,
#crystal_symmetry = fc.crystal_symmetry(),
strict_conflict_handling = True,
force_symmetry = True,
log = None)
geometry = processed_pdb_file.geometry_restraints_manager(
show_energies = False,
plain_pairs_radius = 5,
assume_hydrogens_all_missing = True)
restraints_manager = mmtbx.restraints.manager(
geometry = geometry,
normalization = True)
#for a in ph_answer.atoms():
# print a.i_seq, a.name, a.xyz
#STOP()
#ref_xyz = flex.vec3_double([(14.323, 35.055, 14.635), (16.099, 12.317, 16.37)])
#selection = flex.size_t([1,76])
#
#restraints_manager.geometry.adopt_reference_coordinate_restraints_in_place(
# reference.add_coordinate_restraints(
# sites_cart = ref_xyz,
# selection = selection,
# sigma = 0.1))
# Do real-space refinement
t0=time.time()
ear = mmtbx.refinement.real_space.explode_and_refine.run(
xray_structure = xrs_poor,
pdb_hierarchy = ph_poor,
map_data = target_map_data,
restraints_manager = restraints_manager,
states = states)
print("Time: %6.4f"%(time.time()-t0))
ear.pdb_hierarchy.write_pdb_file(file_name="%s_refined.pdb"%prefix)
states.write(file_name="%s_refined_all_states.pdb"%prefix)
if (__name__ == "__main__"):
run()
print("OK")
| 37.372549 | 82 | 0.716422 |
fad723fb5616215fe627bf19aeb6197ea1be17db | 8,915 | py | Python | k8syaml/k8s_yaml_obj.py | paoloalba/deployer_k8s | 1dcee64122dcad8a72a3c434550e8f9d73828442 | [
"MIT"
] | 2 | 2020-12-01T20:37:22.000Z | 2020-12-02T20:02:22.000Z | k8syaml/k8s_yaml_obj.py | paoloalba/deployer_k8s | 1dcee64122dcad8a72a3c434550e8f9d73828442 | [
"MIT"
] | null | null | null | k8syaml/k8s_yaml_obj.py | paoloalba/deployer_k8s | 1dcee64122dcad8a72a3c434550e8f9d73828442 | [
"MIT"
] | null | null | null | import json
import yaml
import copy
from yaml import YAMLObject
from enum import Enum
from .encode_base64 import encodeb64
class ConversionType(Enum):
quoted_string = 1
integer_string = 2
normal = 3
class K8Selector(YAMLObject):
def __init__(self, matchLabels_dict):
self.matchLabels = copy.deepcopy(matchLabels_dict)
class K8Labels(YAMLObject):
def __init__(self, labels):
self.labels = copy.deepcopy(labels)
class K8EnvironmentVariable(YAMLObject):
def __init__(self, name, value, convert_type):
self.name = name
if convert_type == ConversionType.quoted_string:
self.value = "\"{0}\"".format(value)
elif convert_type == ConversionType.integer_string:
self.value = str(value)
elif convert_type == ConversionType.normal:
self.value = value
else:
raise Exception("Unrecognised conversion type: {0}".format(convert_type))
class K8EnvironmentVariableContainerRuntime(YAMLObject):
def __init__(self, name, fieldPath):
self.name = name
self.valueFrom = {"fieldRef": {"fieldPath": fieldPath}}
class K8EnvironmentVariableFromSecrets(YAMLObject):
def __init__(self, name, secret_name, secret_key):
self.name = name
self.valueFrom = {"secretKeyRef": {"name": secret_name, "key": secret_key}}
class K8ContainerVolumeMount(YAMLObject):
def __init__(self, name, mountPath):
self.name = name
self.mountPath = "{0}".format(mountPath)
# self.mountPath = mountPath
class K8Container(YAMLObject):
def __init__(self,
name,
registry,
image_name,
image_tag,
env_var_list,
env_var_list_runtime,
env_var_list_fromsecrets,
vol_mounts_list,
res_handling):
self.name = name
self.image = "{0}/{1}:{2}".format(registry, image_name, image_tag)
self.imagePullPolicy = "Always"
self.env = self.get_env_vars(env_var_list,
env_var_list_runtime,
env_var_list_fromsecrets)
if vol_mounts_list:
self.volumeMounts = self.get_vol_mounts(vol_mounts_list)
if res_handling:
self.resources = res_handling.__dict__
@staticmethod
def get_env_vars(env_var_list,
env_var_list_runtime,
env_var_list_fromsecrets):
new_list = []
for eee in env_var_list:
new_list.append(K8EnvironmentVariable(eee[0], eee[1], eee[2]).__dict__)
for eee in env_var_list_runtime:
new_list.append(K8EnvironmentVariableContainerRuntime(eee[0], eee[1]).__dict__)
for eee in env_var_list_fromsecrets:
new_list.append(K8EnvironmentVariableFromSecrets(eee[0], eee[1], eee[2]).__dict__)
return new_list
@staticmethod
def get_vol_mounts(vol_mounts_list):
new_list = []
for vvv in vol_mounts_list:
new_list.append(K8ContainerVolumeMount(vvv[0], vvv[1]).__dict__)
return new_list
class K8Volume(YAMLObject):
def __init__(self,
name,
azureFile_secret_name,
azureFile_share_name):
self.name = name
self.azureFile = {}
self.azureFile["secretName"] = azureFile_secret_name
self.azureFile["shareName"] = azureFile_share_name
self.azureFile["readOnly"] = False
class K8Template(YAMLObject):
def __init__(self,
metadata,
spec):
if metadata:
self.metadata = K8Labels(metadata).__dict__
self.spec = spec
class K8Port(YAMLObject):
def __init__(self,
name,
port,
targetPort):
self.name = name
self.port = port
self.targetPort = targetPort
self.protocol = "TCP"
class K8ResourceHandling(YAMLObject):
def __init__(self,
cpu,
mem):
self.requests = {}
self.requests["cpu"] = cpu
self.requests["memory"] = mem
class K8SecretName(YAMLObject):
def __init__(self,
name):
self.name = name
class K8BaseResource(YAMLObject):
yaml_tag = u'!BaseResource'
def __init__(self,
apiVersion,
kind,
metadata):
self.apiVersion = apiVersion
self.kind = kind
self.metadata = copy.deepcopy(metadata)
@staticmethod
def dictify_list(input_list):
new_list = []
for iii in input_list:
new_list.append(iii.__dict__)
return new_list
@staticmethod
def write_to_file(file_path, input_resources_list):
tag_strings = []
tag_strings.append("!Service")
tag_strings.append("!Deployment")
tag_strings.append("!Job")
tag_strings.append("!Secret")
with open(file_path, "w") as f:
# yaml.dump(resource, f)
yml_string = yaml.dump_all(input_resources_list, None)
for sss in tag_strings:
yml_string = yml_string.replace(sss, "")
if yml_string.startswith("\n"):
yml_string = yml_string[1:]
yml_string = yml_string.replace("\'\"", "\'")
yml_string = yml_string.replace("\"\'", "\'")
f.write(yml_string)
class K8Deployment(K8BaseResource):
yaml_tag = u'!Deployment'
def __init__(self,
metadata={}):
super(K8Deployment, self).__init__("apps/v1", "Deployment", metadata)
def fill_specification(self,
replicas,
revisionHistoryLimit,
selector,
template):
spec_dict = {}
spec_dict["replicas"] = replicas
spec_dict["revisionHistoryLimit"] = revisionHistoryLimit
spec_dict["selector"] = selector.__dict__
spec_dict["template"] = template.__dict__
self.spec = spec_dict
def get_template_spec(self,
container_list,
volume_list,
imgPullSecretName=None):
spec_dict = {}
spec_dict["containers"] = self.dictify_list(container_list)
if volume_list:
spec_dict["volumes"] = self.dictify_list(volume_list)
if imgPullSecretName:
spec_dict["imagePullSecrets"] = self.dictify_list([imgPullSecretName])
return spec_dict
class K8Service(K8BaseResource):
yaml_tag = u'!Service'
def __init__(self,
metadata={}):
super(K8Service, self).__init__("v1", "Service", metadata)
def fill_specification(self,
port_list,
selector_dict,
svc_type=None,
static_ip=None,
ip_whitelist=[]):
spec_dict = {}
spec_dict["ports"] = self.dictify_list(port_list)
spec_dict["selector"] = selector_dict
if svc_type and svc_type.lower() == "loadbalancer":
spec_dict["type"] = svc_type
spec_dict["loadBalancerIP"] = static_ip
spec_dict["externalTrafficPolicy"] = "Local"
if len(ip_whitelist) > 0:
spec_dict["loadBalancerSourceRanges"] = [iii + "/32" for iii in ip_whitelist]
self.spec = spec_dict
class K8Job(K8BaseResource):
yaml_tag = u'!Job'
def __init__(self,
metadata={}):
super(K8Job, self).__init__("batch/v1", "Job", metadata)
def fill_specification(self,
backoffLimit,
template):
spec_dict = {}
spec_dict["backoffLimit"] = backoffLimit
spec_dict["template"] = template.__dict__
self.spec = spec_dict
def get_template_spec(self,
container_list,
volume_list,
node_selector=None,
imgPullSecretName=None):
spec_dict = {}
spec_dict["restartPolicy"] = "Never"
spec_dict["containers"] = self.dictify_list(container_list)
if volume_list:
spec_dict["volumes"] = self.dictify_list(volume_list)
if node_selector:
spec_dict["nodeSelector"] = {"agentpool": node_selector}
if imgPullSecretName:
spec_dict["imagePullSecrets"] = self.dictify_list([imgPullSecretName])
return spec_dict
class K8Secret(K8BaseResource):
yaml_tag = u'!Secret'
def __init__(self,
metadata={},
input_type="Opaque"):
super(K8Secret, self).__init__("v1", "Secret", metadata)
self.type = input_type
def fill_data(self,
input_data_dict):
data_dict = {}
for kkk, vvv in input_data_dict.items():
data_dict[kkk] = encodeb64(vvv)
self.data = data_dict
| 31.953405 | 94 | 0.592036 |
7855708544753d413de62f54a7f509a405281942 | 1,620 | py | Python | unit_tests/MLP/test_network.py | stovorov/NaiveNeurals | 88d91f3d4d39859eef372285f093643a447571a4 | [
"MIT"
] | 1 | 2019-01-16T13:45:47.000Z | 2019-01-16T13:45:47.000Z | unit_tests/MLP/test_network.py | stovorov/NaiveNeurals | 88d91f3d4d39859eef372285f093643a447571a4 | [
"MIT"
] | 2 | 2020-03-24T16:17:06.000Z | 2020-03-30T23:53:16.000Z | unit_tests/MLP/test_network.py | stovorov/NaiveNeurals | 88d91f3d4d39859eef372285f093643a447571a4 | [
"MIT"
] | null | null | null | """Module contains test for MLP classes"""
from NaiveNeurals.MLP.network import NeuralNetwork
inputs = [[0, 0, 1, 1], [0, 1, 0, 1]]
targets = [[0, 1, 1, 0]]
testing_model = {
"input": {
"node_0": {
"weight_0": 1
},
"node_1": {
"weight_0": 1
}
},
"hidden_1": {
"node_0": {
"bias": 0,
"weight_0": 5,
"weight_1": 5,
},
"node_1": {
"bias": 0,
"weight_0": -5,
"weight_1": -5,
},
"node_2": {
"bias": 0,
"weight_0": -5,
"weight_1": -5,
},
},
"output": {
"node_0": {
"bias": -15,
"weight_0": 10,
"weight_1": 10,
"weight_2": 10,
},
},
"hidden_act_func": "sigmoid",
"output_act_func": "sigmoid",
}
def test_neural_network():
"""Test NeuralNetwork"""
nn = NeuralNetwork()
nn.setup_network(input_data_size=2, output_data_size=2, hidden_layer_number_of_nodes=3,
hidden_layer_bias=-1, output_layer_bias=-1)
assert nn.hidden_layer.number_of_nodes == 3
assert nn.hidden_layer.weights.ndim == 2
assert nn.hidden_layer.bias == -1
assert nn.hidden_layer.activation_function.label == 'sigmoid'
assert nn.output_layer.number_of_nodes == 2
assert nn.output_layer.bias == -1
assert nn.output_layer.activation_function.label == 'sigmoid'
def test_neural_network_load_model():
"""Test loading of a model"""
nn = NeuralNetwork()
nn.load_model(testing_model)
| 24.923077 | 91 | 0.525309 |
b7bcde4e676aa11b86c12f97134641053cab602b | 26 | py | Python | Dashboard/backend/tests/__init__.py | CESNET/Nemea-GUI | 5ab626a23fa8a3cbd58968dfd7bc8ae2263d0595 | [
"BSD-3-Clause"
] | null | null | null | Dashboard/backend/tests/__init__.py | CESNET/Nemea-GUI | 5ab626a23fa8a3cbd58968dfd7bc8ae2263d0595 | [
"BSD-3-Clause"
] | null | null | null | Dashboard/backend/tests/__init__.py | CESNET/Nemea-GUI | 5ab626a23fa8a3cbd58968dfd7bc8ae2263d0595 | [
"BSD-3-Clause"
] | 1 | 2019-06-05T08:04:04.000Z | 2019-06-05T08:04:04.000Z | from .db_data_gen import * | 26 | 26 | 0.807692 |
a7c84377ed49c8dd0eb04a0eaedf72445184d87b | 3,640 | py | Python | include/scons/test/MSVS/vs-14.1-files.py | SWEN-712/screen-reader-brandonp728 | e30c25ad2d10ce632fac0548696a61a872328f59 | [
"bzip2-1.0.6"
] | null | null | null | include/scons/test/MSVS/vs-14.1-files.py | SWEN-712/screen-reader-brandonp728 | e30c25ad2d10ce632fac0548696a61a872328f59 | [
"bzip2-1.0.6"
] | 4 | 2019-04-11T16:27:45.000Z | 2019-04-11T23:56:30.000Z | include/scons/test/MSVS/vs-14.1-files.py | SWEN-712/screen-reader-brandonp728 | e30c25ad2d10ce632fac0548696a61a872328f59 | [
"bzip2-1.0.6"
] | null | null | null | #!/usr/bin/env python
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
"""
Test that we can generate Visual Studio 14.1 project (.vcxproj) and
solution (.sln) files that look correct.
"""
import os
import TestSConsMSVS
test = TestSConsMSVS.TestSConsMSVS()
host_arch = test.get_vs_host_arch()
# Make the test infrastructure think we have this version of MSVS installed.
test._msvs_versions = ['14.1']
expected_slnfile = TestSConsMSVS.expected_slnfile_14_1
expected_vcprojfile = TestSConsMSVS.expected_vcprojfile_14_1
SConscript_contents = TestSConsMSVS.SConscript_contents_14_1
test.write('SConstruct', SConscript_contents%{'HOST_ARCH': host_arch})
test.run(arguments="Test.vcxproj")
test.must_exist(test.workpath('Test.vcxproj'))
test.must_exist(test.workpath('Test.vcxproj.filters'))
vcxproj = test.read('Test.vcxproj', 'r')
expect = test.msvs_substitute(expected_vcprojfile, '14.1', None, 'SConstruct')
# don't compare the pickled data
assert vcxproj[:len(expect)] == expect, test.diff_substr(expect, vcxproj)
test.must_exist(test.workpath('Test.sln'))
sln = test.read('Test.sln', 'r')
expect = test.msvs_substitute(expected_slnfile, '14.1', None, 'SConstruct')
# don't compare the pickled data
assert sln[:len(expect)] == expect, test.diff_substr(expect, sln)
test.run(arguments='-c .')
test.must_not_exist(test.workpath('Test.vcxproj'))
test.must_not_exist(test.workpath('Test.vcxproj.filters'))
test.must_not_exist(test.workpath('Test.sln'))
test.run(arguments='Test.vcxproj')
test.must_exist(test.workpath('Test.vcxproj'))
test.must_exist(test.workpath('Test.vcxproj.filters'))
test.must_exist(test.workpath('Test.sln'))
test.run(arguments='-c Test.sln')
test.must_not_exist(test.workpath('Test.vcxproj'))
test.must_not_exist(test.workpath('Test.vcxproj.filters'))
test.must_not_exist(test.workpath('Test.sln'))
# Test that running SCons with $PYTHON_ROOT in the environment
# changes the .vcxproj output as expected.
os.environ['PYTHON_ROOT'] = 'xyzzy'
python = os.path.join('$(PYTHON_ROOT)', os.path.split(TestSConsMSVS.python)[1])
test.run(arguments='Test.vcxproj')
test.must_exist(test.workpath('Test.vcxproj'))
vcxproj = test.read('Test.vcxproj', 'r')
expect = test.msvs_substitute(expected_vcprojfile, '14.1', None, 'SConstruct',
python=python)
# don't compare the pickled data
assert vcxproj[:len(expect)] == expect, test.diff_substr(expect, vcxproj)
test.pass_test()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| 33.394495 | 79 | 0.762088 |
fed46453021b165d2e7aefa929aa1f68907c7ebd | 411 | py | Python | Vault7/Lost-in-Translation/windows/Resources/Ops/PyScripts/overseer/plugins/network_interfaces.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | 46 | 2017-05-15T11:15:08.000Z | 2018-07-02T03:32:52.000Z | Vault7/Lost-in-Translation/windows/Resources/Ops/PyScripts/overseer/plugins/network_interfaces.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | null | null | null | Vault7/Lost-in-Translation/windows/Resources/Ops/PyScripts/overseer/plugins/network_interfaces.py | dendisuhubdy/grokmachine | 120a21a25c2730ed356739231ec8b99fc0575c8b | [
"BSD-3-Clause"
] | 24 | 2017-05-17T03:26:17.000Z | 2018-07-09T07:00:50.000Z |
import dsz
MENU_TEXT = 'List all network interfaces'
def main():
dsz.ui.Echo('Running network interface commands...', dsz.GOOD)
dsz.control.echo.Off()
dsz.cmd.Run('background log devicequery -deviceclass net', dsz.RUN_FLAG_RECORD)
dsz.cmd.Run('background log performance -data NetworkInterface', dsz.RUN_FLAG_RECORD)
dsz.control.echo.On()
if (__name__ == '__main__'):
main() | 34.25 | 90 | 0.698297 |
c196fa1fac069b963ebe62d3355f45222c12cab7 | 2,414 | py | Python | bank/admin.py | WillieShi/security-protocol | 7b55a06315bfbd61b7fe811b185624259ae5900a | [
"Apache-2.0"
] | null | null | null | bank/admin.py | WillieShi/security-protocol | 7b55a06315bfbd61b7fe811b185624259ae5900a | [
"Apache-2.0"
] | null | null | null | bank/admin.py | WillieShi/security-protocol | 7b55a06315bfbd61b7fe811b185624259ae5900a | [
"Apache-2.0"
] | null | null | null | """ Bank Server
This module implements the admin access to the database
"""
import db
import cmd
import json
import os
class Admin(cmd.Cmd, object):
intro = 'Welcome to the Admin Interface, type help or ? to list commands.\n'
prompt = 'admin$ '
def __init__(self):
super(Admin, self).__init__()
self.db = db.DB()
if not self.db.exists():
self.db.init_db()
def do_add_atm(self, args):
"""Usage: add_atm atm_id"""
args = args.split(" ")
if len(args) < 1 or args[0] == '':
print("Usage: add_atm config_outfile [bill_file]")
return
uuid = os.urandom(36)
cfg = {"uuid": uuid.encode('hex'), "dispensed": 0}
if len(args) == 2:
with open(args[1], 'r') as f:
cfg["bills"] = f.read().split("\n")
with open(args[0], 'w') as f:
f.write(json.dumps(cfg))
if self.db.admin_create_atm(uuid):
print("ATM %s added" % args[0].encode('hex')[:16])
else:
print("ATM add failed!")
def do_add_card(self, args):
"""Usage: add_card card_id balance"""
args = args.split(" ")
if len(args) != 2:
print("Usage: add_card card_id balance")
return
try:
int(args[1])
except ValueError:
print("Error: balance must be a valid integer")
if self.db.admin_create_account(args[0], args[1]):
print("Card %s added" % args[0])
else:
print("Card add failed!")
def do_check_balance(self, args):
"""Usage: check_balance card_id"""
args = args.split(" ")
if len(args) != 1 or args[0] == '':
print("Usage: check_balance card_id")
return
b = self.db.get_balance(args[0])
if b:
print("Card %s balance: %d" % (args[0], int(b)))
else:
print("Card %s does not exist!" % args[0])
def do_update_balance(self, args):
"""Usage: update_balance card_id balance"""
args = args.split(" ")
if len(args) != 2:
print("Usage: update_balance card_id balance")
return
if self.db.admin_set_balance(args[0], args[1]):
print("Updated balance")
else:
print("Balance update failed!")
if __name__ == "__main__":
Admin().cmdloop()
| 27.747126 | 80 | 0.527341 |
96957f71013bc8f3e2d07ea2a7f0a2eb59f53d14 | 1,507 | py | Python | automl/beta/video_object_tracking_create_dataset_test.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 2 | 2020-09-19T04:22:52.000Z | 2020-09-23T14:04:17.000Z | automl/beta/video_object_tracking_create_dataset_test.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 1 | 2020-07-24T19:18:29.000Z | 2020-07-24T19:45:23.000Z | automl/beta/video_object_tracking_create_dataset_test.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 2 | 2020-09-13T03:47:22.000Z | 2020-09-23T14:04:19.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import uuid
from google.cloud import automl_v1beta1 as automl
import pytest
import video_object_tracking_create_dataset
PROJECT_ID = os.environ["AUTOML_PROJECT_ID"]
DATASET_ID = None
@pytest.fixture(scope="function", autouse=True)
def teardown():
yield
# Delete the created dataset
client = automl.AutoMlClient()
dataset_full_id = client.dataset_path(
PROJECT_ID, "us-central1", DATASET_ID
)
response = client.delete_dataset(dataset_full_id)
response.result()
def test_video_classification_create_dataset(capsys):
# create dataset
dataset_name = "test_{}".format(uuid.uuid4()).replace("-", "")[:32]
video_object_tracking_create_dataset.create_dataset(
PROJECT_ID, dataset_name
)
out, _ = capsys.readouterr()
assert "Dataset id: " in out
# Get the dataset id for deletion
global DATASET_ID
DATASET_ID = out.splitlines()[1].split()[2]
| 28.980769 | 74 | 0.736563 |
3350515acbfda6d2034196986fb49dcfb1e25369 | 4,360 | py | Python | functions/plots.py | danibachar/idc_CANLab | 7bf6fc9b1908d5a2608f070770ccf6536be0686f | [
"MIT"
] | null | null | null | functions/plots.py | danibachar/idc_CANLab | 7bf6fc9b1908d5a2608f070770ccf6536be0686f | [
"MIT"
] | 1 | 2020-08-18T13:48:07.000Z | 2020-08-18T13:48:07.000Z | functions/plots.py | danibachar/idc_CANLab | 7bf6fc9b1908d5a2608f070770ccf6536be0686f | [
"MIT"
] | null | null | null | from bokeh.plotting import figure, show, output_file, save
from bokeh.layouts import gridplot
from bokeh.models import ColumnDataSource,Range1d
from .core.storage import upload_file
from .utils import chunks, build_remote_and_local_file_names
def plots_by_group_and_features(df, groupping_col_name, y_name, x_name, grid_features, width=300, height=200):
groups = df.groupby(by=[groupping_col_name])
plots = []
for group in groups:
g = group[-1]
group_col = g[groupping_col_name]
if len(group_col) == 0:
continue
parsed_feature_set = set()
group_id = groupping_col_name+"="+str(group_col.iloc[0])
plots += _plots_array_by(g, grid_features, y_name, x_name, width, height, group_id)
grid_count = 0
for f in grid_features:
grid_count += len(df[f].unique())
remote_file_name, local_file_name = build_remote_and_local_file_names("groups_by_features","html")
output_file(local_file_name,mode='inline')
local_url = save(gridplot(chunks(plots, len(grid_features)*(grid_count+len(grid_features)))))
remote_url = upload_file(local_url, remote_file_name)
return plots, remote_url
def plot_general_avg(df, y_name, x_name, width=600, height=400):
max_y_value = df[y_name].max()
min_y_value = df[y_name].min()
groupd_avg = df.groupby(by=[x_name])[y_name].mean().reset_index()
p = figure(
plot_width=width, plot_height=height,
title="Avarage {}".format(y_name)
)
p.line(x=x_name, y=y_name, source=ColumnDataSource(groupd_avg))
p.y_range=Range1d(min_y_value, max_y_value)
remote_file_name, local_file_name = build_remote_and_local_file_names("general_avg","html")
output_file(local_file_name,mode='inline')
local_url = save(p)
remote_url = upload_file(local_url, remote_file_name)
return p, remote_url
def plot_general_avg_grid(df, y_name, x_name, grid_features, width=400, height=400):
plots = _plots_array_by(df, grid_features, y_name, x_name, width, height)
grid_count = 0
for f in grid_features:
grid_count += len(df[f].unique())
remote_file_name, local_file_name = build_remote_and_local_file_names("general_avg_grid","html")
output_file(local_file_name,mode='inline')
local_url = save(gridplot(chunks(plots, len(grid_features))))
remote_url = upload_file(local_url, remote_file_name)
return plots, remote_url
def _plots_array_by(df, grid_features, y_name, x_name, width, height, group_id=""):
max_y_value = df[y_name].max()
min_y_value = df[y_name].min()
plots = []
parsed_feature_set = set()
for i in range(len(grid_features)):
for j in range(len(grid_features)):
feature_name = grid_features[i]
feature_values = df[feature_name].unique()
other_feature_name = grid_features[j]
other_feature_values = df[other_feature_name].unique()
for f_val in feature_values:
for of_val in other_feature_values:
fkey1 = "{} X {}".format(f_val, of_val)
fkey2 = "{} X {}".format(of_val, f_val)
if fkey1 in parsed_feature_set or fkey2 in parsed_feature_set:
continue
parsed_feature_set.add(fkey1)
parsed_feature_set.add(fkey2)
title = group_id + "_" + feature_name + "=" + str(f_val) + "/" + other_feature_name + "=" + str(of_val)
selector = (df[feature_name] == f_val) & (df[other_feature_name] == of_val)
if True not in list(selector.unique()):
continue
gg = df[selector]
y = gg.groupby(by=[x_name])[y_name].mean().reset_index()
raw_data_source = ColumnDataSource(y)
p = figure(
plot_width=width, plot_height=height,
title=title,
x_axis_label=x_name,
y_axis_label=y_name,
)
p.y_range=Range1d(min_y_value, max_y_value)
p.title.text_font_size="7px"
p.xaxis.axis_label_text_font_size = "7px"
p.yaxis.axis_label_text_font_size = "7px"
p.line(x=x_name, y=y_name, source=raw_data_source)
plots.append(p)
return plots
| 40.37037 | 123 | 0.643807 |
fd8c23672343fcd341258315447dd3b1275b4961 | 3,144 | py | Python | cirq/google/api/v2/program.py | muneerqu/Cirq | 729d993312467d8ea9127103f9e15ae2391e7d85 | [
"Apache-2.0"
] | 1 | 2020-07-14T19:43:54.000Z | 2020-07-14T19:43:54.000Z | cirq/google/api/v2/program.py | 1eedaegon/Cirq | de0c5e855069bba71e55b070fc9b06f58c07a861 | [
"Apache-2.0"
] | null | null | null | cirq/google/api/v2/program.py | 1eedaegon/Cirq | de0c5e855069bba71e55b070fc9b06f58c07a861 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Type, TYPE_CHECKING
from cirq import devices, ops
if TYPE_CHECKING:
import cirq
def qubit_to_proto_id(q: ops.Qid) -> str:
"""Return a proto id for a `cirq.Qid`.
For `cirq.GridQubit`s this id `{row}_{col}` where `{row}` is the integer
row of the grid qubit, and `{col}` is the integer column of the qubit.
For `cirq.NamedQubit`s this id is the name.
For `cirq.LineQubit`s this is string of the `x` attribute.
"""
if isinstance(q, devices.GridQubit):
return '{}_{}'.format(q.row, q.col)
elif isinstance(q, ops.NamedQubit):
return q.name
elif isinstance(q, devices.LineQubit):
return '{}'.format(q.x)
else:
raise ValueError('Qubits of type {} do not support proto id'.format(
type(q)))
def grid_qubit_from_proto_id(proto_id: str) -> 'cirq.GridQubit':
"""Parse a proto id to a `cirq.GridQubit`.
Proto ids for grid qubits are of the form `{row}_{col}` where `{row}` is
the integer row of the grid qubit, and `{col}` is the integer column of
the qubit.
Args:
proto_id: The id to convert.
Returns:
A `cirq.GridQubit` corresponding to the proto id.
Raises:
ValueError: If the string not of the correct format.
"""
parts = proto_id.split('_')
if len(parts) != 2:
raise ValueError(
'GridQubit proto id must be of the form <int>_<int> but was {}'.
format(proto_id))
try:
row, col = parts
return devices.GridQubit(row=int(row), col=int(col))
except ValueError:
raise ValueError(
'GridQubit proto id must be of the form <int>_<int> but was {}'.
format(proto_id))
def line_qubit_from_proto_id(proto_id: str) -> 'cirq.LineQubit':
"""Parse a proto id to a `cirq.LineQubit`.
Proto ids for line qubits are integer strings representing the `x`
attribute of the line qubit.
Args:
proto_id: The id to convert.
Returns:
A `cirq.LineQubit` corresponding to the proto id.
Raises:
ValueError: If the string is not an integer.
"""
try:
return devices.LineQubit(x=int(proto_id))
except ValueError:
raise ValueError(
'Line qubit proto id must be an int but was {}'.format(proto_id))
def named_qubit_from_proto_id(proto_id: str) -> 'cirq.NamedQubit':
"""Parse a proto id to a `cirq.NamedQubit'
This simply returns a `cirq.NamedQubit` with a name equal to `proto_id`.
"""
return ops.NamedQubit(proto_id)
| 30.823529 | 77 | 0.656807 |
8540d54c36bb5808481314080ef487152eac9fa5 | 13,339 | py | Python | backend/tests/baserow/contrib/database/rows/test_rows_handler.py | ericderace/baserow | 7b35e81f75166d914d07ef4ad0c30c625b6bb396 | [
"MIT"
] | 1 | 2021-04-13T16:27:58.000Z | 2021-04-13T16:27:58.000Z | backend/tests/baserow/contrib/database/rows/test_rows_handler.py | jacklicn/baserow | 978d9462ededbaa96674a6653028ba19876ea273 | [
"MIT"
] | 6 | 2021-04-08T22:03:06.000Z | 2022-01-13T03:38:17.000Z | backend/tests/baserow/contrib/database/rows/test_rows_handler.py | jacklicn/baserow | 978d9462ededbaa96674a6653028ba19876ea273 | [
"MIT"
] | null | null | null | import pytest
from unittest.mock import patch
from decimal import Decimal
from django.core.exceptions import ValidationError
from django.db import models
from baserow.core.exceptions import UserNotInGroupError
from baserow.contrib.database.rows.handler import RowHandler
from baserow.contrib.database.rows.exceptions import RowDoesNotExist
def test_get_field_ids_from_dict():
handler = RowHandler()
assert handler.extract_field_ids_from_dict({
1: 'Included',
'field_2': 'Included',
'3': 'Included',
'abc': 'Not included',
'fieldd_3': 'Not included'
}) == [1, 2, 3]
def test_extract_field_ids_from_string():
handler = RowHandler()
assert handler.extract_field_ids_from_string(None) == []
assert handler.extract_field_ids_from_string('not,something') == []
assert handler.extract_field_ids_from_string('field_1,field_2') == [1, 2]
assert handler.extract_field_ids_from_string('field_22,test_8,999') == [22, 8, 999]
assert handler.extract_field_ids_from_string('is,1,one') == [1]
@pytest.mark.django_db
def test_get_include_exclude_fields(data_fixture):
table = data_fixture.create_database_table()
table_2 = data_fixture.create_database_table()
field_1 = data_fixture.create_text_field(table=table, order=1)
field_2 = data_fixture.create_text_field(table=table, order=2)
field_3 = data_fixture.create_text_field(table=table_2, order=3)
row_handler = RowHandler()
assert row_handler.get_include_exclude_fields(
table,
include=None,
exclude=None
) is None
assert row_handler.get_include_exclude_fields(
table,
include='',
exclude=''
) is None
fields = row_handler.get_include_exclude_fields(
table,
f'field_{field_1.id}'
)
assert len(fields) == 1
assert fields[0].id == field_1.id
fields = row_handler.get_include_exclude_fields(
table,
f'field_{field_1.id},field_9999,field_{field_2.id}'
)
assert len(fields) == 2
assert fields[0].id == field_1.id
assert fields[1].id == field_2.id
fields = row_handler.get_include_exclude_fields(
table,
None,
f'field_{field_1.id},field_9999'
)
assert len(fields) == 1
assert fields[0].id == field_2.id
fields = row_handler.get_include_exclude_fields(
table,
f'field_{field_1.id},field_{field_2}',
f'field_{field_1.id}'
)
assert len(fields) == 1
assert fields[0].id == field_2.id
fields = row_handler.get_include_exclude_fields(
table,
f'field_{field_3.id}'
)
assert len(fields) == 0
fields = row_handler.get_include_exclude_fields(
table,
None,
f'field_{field_3.id}'
)
assert len(fields) == 2
@pytest.mark.django_db
def test_extract_manytomany_values(data_fixture):
row_handler = RowHandler()
class TemporaryModel1(models.Model):
class Meta:
app_label = 'test'
class TemporaryModel2(models.Model):
field_1 = models.CharField()
field_2 = models.ManyToManyField(TemporaryModel1)
class Meta:
app_label = 'test'
values = {
'field_1': 'Value 1',
'field_2': ['Value 2']
}
values, manytomany_values = row_handler.extract_manytomany_values(
values, TemporaryModel2
)
assert len(values.keys()) == 1
assert 'field_1' in values
assert len(manytomany_values.keys()) == 1
assert 'field_2' in manytomany_values
@pytest.mark.django_db
@patch('baserow.contrib.database.rows.signals.row_created.send')
def test_create_row(send_mock, data_fixture):
user = data_fixture.create_user()
user_2 = data_fixture.create_user()
table = data_fixture.create_database_table(name='Car', user=user)
name_field = data_fixture.create_text_field(
table=table, name='Name', text_default='Test'
)
speed_field = data_fixture.create_number_field(
table=table, name='Max speed', number_negative=True
)
price_field = data_fixture.create_number_field(
table=table, name='Price', number_type='DECIMAL', number_decimal_places=2,
number_negative=False
)
handler = RowHandler()
with pytest.raises(UserNotInGroupError):
handler.create_row(user=user_2, table=table)
row_1 = handler.create_row(user=user, table=table, values={
name_field.id: 'Tesla',
speed_field.id: 240,
f'field_{price_field.id}': 59999.99,
9999: 'Must not be added'
})
assert getattr(row_1, f'field_{name_field.id}') == 'Tesla'
assert getattr(row_1, f'field_{speed_field.id}') == 240
assert getattr(row_1, f'field_{price_field.id}') == 59999.99
assert not getattr(row_1, f'field_9999', None)
assert row_1.order == 1
row_1.refresh_from_db()
assert getattr(row_1, f'field_{name_field.id}') == 'Tesla'
assert getattr(row_1, f'field_{speed_field.id}') == 240
assert getattr(row_1, f'field_{price_field.id}') == Decimal('59999.99')
assert not getattr(row_1, f'field_9999', None)
assert row_1.order == Decimal('1.00000000000000000000')
send_mock.assert_called_once()
assert send_mock.call_args[1]['row'].id == row_1.id
assert send_mock.call_args[1]['user'].id == user.id
assert send_mock.call_args[1]['table'].id == table.id
assert send_mock.call_args[1]['before'] is None
assert send_mock.call_args[1]['model']._generated_table_model
row_2 = handler.create_row(user=user, table=table)
assert getattr(row_2, f'field_{name_field.id}') == 'Test'
assert not getattr(row_2, f'field_{speed_field.id}')
assert not getattr(row_2, f'field_{price_field.id}')
row_1.refresh_from_db()
assert row_1.order == Decimal('1.00000000000000000000')
assert row_2.order == Decimal('2.00000000000000000000')
row_3 = handler.create_row(user=user, table=table, before=row_2)
row_1.refresh_from_db()
row_2.refresh_from_db()
assert row_1.order == Decimal('1.00000000000000000000')
assert row_2.order == Decimal('2.00000000000000000000')
assert row_3.order == Decimal('1.99999999999999999999')
assert send_mock.call_args[1]['before'].id == row_2.id
row_4 = handler.create_row(user=user, table=table, before=row_2)
row_1.refresh_from_db()
row_2.refresh_from_db()
row_3.refresh_from_db()
assert row_1.order == Decimal('1.00000000000000000000')
assert row_2.order == Decimal('2.00000000000000000000')
assert row_3.order == Decimal('1.99999999999999999998')
assert row_4.order == Decimal('1.99999999999999999999')
row_5 = handler.create_row(user=user, table=table, before=row_3)
row_1.refresh_from_db()
row_2.refresh_from_db()
row_3.refresh_from_db()
row_4.refresh_from_db()
assert row_1.order == Decimal('1.00000000000000000000')
assert row_2.order == Decimal('2.00000000000000000000')
assert row_3.order == Decimal('1.99999999999999999998')
assert row_4.order == Decimal('1.99999999999999999999')
assert row_5.order == Decimal('1.99999999999999999997')
row_6 = handler.create_row(user=user, table=table, before=row_2)
row_1.refresh_from_db()
row_2.refresh_from_db()
row_3.refresh_from_db()
row_4.refresh_from_db()
row_5.refresh_from_db()
assert row_1.order == Decimal('1.00000000000000000000')
assert row_2.order == Decimal('2.00000000000000000000')
assert row_3.order == Decimal('1.99999999999999999997')
assert row_4.order == Decimal('1.99999999999999999998')
assert row_5.order == Decimal('1.99999999999999999996')
assert row_6.order == Decimal('1.99999999999999999999')
row_7 = handler.create_row(user, table=table, before=row_1)
row_1.refresh_from_db()
row_2.refresh_from_db()
row_3.refresh_from_db()
row_4.refresh_from_db()
row_5.refresh_from_db()
row_6.refresh_from_db()
assert row_1.order == Decimal('1.00000000000000000000')
assert row_2.order == Decimal('2.00000000000000000000')
assert row_3.order == Decimal('1.99999999999999999997')
assert row_4.order == Decimal('1.99999999999999999998')
assert row_5.order == Decimal('1.99999999999999999996')
assert row_6.order == Decimal('1.99999999999999999999')
assert row_7.order == Decimal('0.99999999999999999999')
with pytest.raises(ValidationError):
handler.create_row(user=user, table=table, values={
price_field.id: -10.22
})
model = table.get_model()
rows = model.objects.all()
assert len(rows) == 7
assert rows[0].id == row_7.id
assert rows[1].id == row_1.id
assert rows[2].id == row_5.id
assert rows[3].id == row_3.id
assert rows[4].id == row_4.id
assert rows[5].id == row_6.id
assert rows[6].id == row_2.id
row_2.delete()
row_8 = handler.create_row(user, table=table)
assert row_8.order == Decimal('3.00000000000000000000')
@pytest.mark.django_db
def test_get_row(data_fixture):
user = data_fixture.create_user()
user_2 = data_fixture.create_user()
table = data_fixture.create_database_table(name='Car', user=user)
name_field = data_fixture.create_text_field(
table=table, name='Name', text_default='Test'
)
speed_field = data_fixture.create_number_field(
table=table, name='Max speed', number_negative=True
)
price_field = data_fixture.create_number_field(
table=table, name='Price', number_type='DECIMAL', number_decimal_places=2,
number_negative=False
)
handler = RowHandler()
row = handler.create_row(user=user, table=table, values={
f'field_{name_field.id}': 'Tesla',
f'field_{speed_field.id}': 240,
f'field_{price_field.id}': Decimal('59999.99')
})
with pytest.raises(UserNotInGroupError):
handler.get_row(user=user_2, table=table, row_id=row.id)
with pytest.raises(RowDoesNotExist):
handler.get_row(user=user, table=table, row_id=99999)
row_tmp = handler.get_row(user=user, table=table, row_id=row.id)
assert row_tmp.id == row.id
assert getattr(row_tmp, f'field_{name_field.id}') == 'Tesla'
assert getattr(row_tmp, f'field_{speed_field.id}') == 240
assert getattr(row_tmp, f'field_{price_field.id}') == Decimal('59999.99')
@pytest.mark.django_db
@patch('baserow.contrib.database.rows.signals.row_updated.send')
def test_update_row(send_mock, data_fixture):
user = data_fixture.create_user()
user_2 = data_fixture.create_user()
table = data_fixture.create_database_table(name='Car', user=user)
name_field = data_fixture.create_text_field(
table=table, name='Name', text_default='Test'
)
speed_field = data_fixture.create_number_field(
table=table, name='Max speed', number_negative=True
)
price_field = data_fixture.create_number_field(
table=table, name='Price', number_type='DECIMAL', number_decimal_places=2,
number_negative=False
)
handler = RowHandler()
row = handler.create_row(user=user, table=table)
with pytest.raises(UserNotInGroupError):
handler.update_row(user=user_2, table=table, row_id=row.id, values={})
with pytest.raises(RowDoesNotExist):
handler.update_row(user=user, table=table, row_id=99999, values={})
with pytest.raises(ValidationError):
handler.update_row(user=user, table=table, row_id=row.id, values={
price_field.id: -10.99
})
handler.update_row(user=user, table=table, row_id=row.id, values={
name_field.id: 'Tesla',
speed_field.id: 240,
f'field_{price_field.id}': 59999.99
})
row.refresh_from_db()
assert getattr(row, f'field_{name_field.id}') == 'Tesla'
assert getattr(row, f'field_{speed_field.id}') == 240
assert getattr(row, f'field_{price_field.id}') == Decimal('59999.99')
send_mock.assert_called_once()
assert send_mock.call_args[1]['row'].id == row.id
assert send_mock.call_args[1]['user'].id == user.id
assert send_mock.call_args[1]['table'].id == table.id
assert send_mock.call_args[1]['model']._generated_table_model
@pytest.mark.django_db
@patch('baserow.contrib.database.rows.signals.row_deleted.send')
def test_delete_row(send_mock, data_fixture):
user = data_fixture.create_user()
user_2 = data_fixture.create_user()
table = data_fixture.create_database_table(name='Car', user=user)
data_fixture.create_text_field(table=table, name='Name', text_default='Test')
handler = RowHandler()
model = table.get_model()
row = handler.create_row(user=user, table=table)
handler.create_row(user=user, table=table)
with pytest.raises(UserNotInGroupError):
handler.delete_row(user=user_2, table=table, row_id=row.id)
with pytest.raises(RowDoesNotExist):
handler.delete_row(user=user, table=table, row_id=99999)
row_id = row.id
handler.delete_row(user=user, table=table, row_id=row.id)
assert model.objects.all().count() == 1
send_mock.assert_called_once()
assert send_mock.call_args[1]['row_id'] == row_id
assert send_mock.call_args[1]['row']
assert send_mock.call_args[1]['user'].id == user.id
assert send_mock.call_args[1]['table'].id == table.id
assert send_mock.call_args[1]['model']._generated_table_model
| 35.381963 | 87 | 0.693155 |
a6d81dda318396912956f87808143d62e83b9639 | 4,861 | py | Python | keras2onnx/main.py | aj-ames/keras-onnx | 74e855a759ff934b1762a0b31589e28bdb8be98e | [
"MIT"
] | 1 | 2021-04-15T16:35:54.000Z | 2021-04-15T16:35:54.000Z | keras2onnx/main.py | aj-ames/keras-onnx | 74e855a759ff934b1762a0b31589e28bdb8be98e | [
"MIT"
] | null | null | null | keras2onnx/main.py | aj-ames/keras-onnx | 74e855a759ff934b1762a0b31589e28bdb8be98e | [
"MIT"
] | null | null | null | ###############################################################################
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
###############################################################################
import logging
from onnxconverter_common.onnx_ex import get_maximum_opset_supported
from .proto import keras, is_tf_keras
from .proto.tfcompat import tensorflow as tf
from .proto.tfcompat import is_tf2, dump_graph_into_tensorboard
from .proto import onnx
from .topology import convert_topology
from .ke2onnx import static_set_ke2onnx_converters
from .parser import parse_graph, parse_graph_modeless
from .topology import Topology
from .common.utils import set_logger_level, k2o_logger
from .funcbook import set_converter
from ._parser_1x import build_opdict_from_keras
from ._parse_tf import tsname_to_node, build_layer_output_from_model
def convert_keras(model, name=None, doc_string='', target_opset=None,
channel_first_inputs=None, debug_mode=False, custom_op_conversions=None):
# type: (keras.Model, str, str, int, [], bool, {}) -> onnx.ModelProto
"""
:param model: keras model
:param name: the converted onnx model internal name
:param doc_string: doc string
:param target_opset: the targeted onnx model opset
:param channel_first_inputs: A list of channel first input
:param debug_mode: will enable the log and try to convert as much as possible on conversion
:param custom_op_conversions: the handler for custom operator conversion
:return an ONNX ModelProto
"""
if isinstance(model, tf.keras.Model) and not is_tf_keras:
raise Exception("This is a tensorflow keras model, but keras standalone converter is used." +
" Please set environment variable TF_KERAS = 1.")
set_logger_level(logging.DEBUG if debug_mode else logging.INFO)
if is_tf2:
from tensorflow.python.eager import context
k2o_logger().info("tf executing eager_mode: {}".format(context.executing_eagerly()))
if hasattr(model, 'run_eagerly'):
k2o_logger().info("tf.keras model eager_mode: {}".format(model.run_eagerly))
if debug_mode:
print(model.summary())
name = name or model.name
target_opset = target_opset or get_maximum_opset_supported()
input_names = []
output_names = []
output_dict = {}
if is_tf2 and is_tf_keras:
tf_graph = build_layer_output_from_model(model, output_dict, input_names, output_names)
else:
tf_graph = model.outputs[0].graph if is_tf2 else keras.backend.get_session().graph
output_dict = build_opdict_from_keras(model)
output_names = [n.name for n in model.outputs]
static_set_ke2onnx_converters(set_converter)
dump_graph_into_tensorboard(tf_graph)
topology = Topology(model, tf_graph,
target_opset=target_opset,
custom_op_dict=custom_op_conversions)
topology.debug_mode = debug_mode
if (not model.inputs) or (not model.outputs):
# Since Tensorflow 2.2, For the subclassed tf.keras model, there is no inputs/outputs info ...
# ... stored in model object any more.
parse_graph_modeless(topology, tf_graph, target_opset, input_names, output_names, output_dict)
else:
parse_graph(topology, tf_graph, target_opset, output_names, output_dict)
topology.compile()
return convert_topology(topology, name, doc_string, target_opset, channel_first_inputs)
def build_io_names_tf2onnx(model):
return {
'input_names': [n_.name for n_ in model.inputs],
'output_names': [n_.name for n_ in model.outputs]
}
def _freeze_graph(session, keep_var_names=None, output_names=None):
graph = tf.get_default_graph()
freeze_var_names = list(set(v.op.name for v in tf.global_variables()).difference(keep_var_names or []))
input_graph_def = graph.as_graph_def()
for node in input_graph_def.node:
node.device = ""
frozen_graph_def = tf.graph_util.convert_variables_to_constants(
session, input_graph_def, output_names, freeze_var_names)
return frozen_graph_def
def export_tf_frozen_graph(model, keep_var_names=None, output_names=None):
"""
Freezes internal tensorflow graph for the specified keras model.
:return The frozen graph object.
"""
if is_tf2:
raise RuntimeError("Only Tensorflow 1.x supported.")
session = keras.backend.get_session()
graph = model.outputs[0].graph if is_tf2 else session.graph
with graph.as_default():
output_names = output_names or \
[tsname_to_node(n_) for n_ in build_io_names_tf2onnx(model)['output_names']]
return _freeze_graph(session, keep_var_names, output_names)
| 44.59633 | 107 | 0.703559 |
29ed58be5d8bfcc23246ce013dd46f1da56d726d | 291 | py | Python | configs/conformer/conformer-base-p16_8xb128_in1k.py | YuxinZou/mmclassification | 2037260ea6c98a3b115e97727e1151a1c2c32f7a | [
"Apache-2.0"
] | 1 | 2022-03-07T13:55:57.000Z | 2022-03-07T13:55:57.000Z | configs/conformer/conformer-base-p16_8xb128_in1k.py | YuxinZou/mmclassification | 2037260ea6c98a3b115e97727e1151a1c2c32f7a | [
"Apache-2.0"
] | 5 | 2022-03-02T02:58:56.000Z | 2022-03-23T05:51:53.000Z | configs/conformer/conformer-base-p16_8xb128_in1k.py | YuxinZou/mmclassification | 2037260ea6c98a3b115e97727e1151a1c2c32f7a | [
"Apache-2.0"
] | 1 | 2022-01-04T03:19:50.000Z | 2022-01-04T03:19:50.000Z | _base_ = [
'../_base_/models/conformer/base-p16.py',
'../_base_/datasets/imagenet_bs64_swin_224.py',
'../_base_/schedules/imagenet_bs1024_adamw_conformer.py',
'../_base_/default_runtime.py'
]
data = dict(samples_per_gpu=128)
evaluation = dict(interval=1, metric='accuracy')
| 29.1 | 61 | 0.714777 |
3ee3bbe4eece41d497ffdaccc2cb244c393538ca | 26,743 | py | Python | insights/specs/__init__.py | pilhuhn/insights-core | 9aff2aa315867a59f2c9de89615058368b342326 | [
"Apache-2.0"
] | null | null | null | insights/specs/__init__.py | pilhuhn/insights-core | 9aff2aa315867a59f2c9de89615058368b342326 | [
"Apache-2.0"
] | null | null | null | insights/specs/__init__.py | pilhuhn/insights-core | 9aff2aa315867a59f2c9de89615058368b342326 | [
"Apache-2.0"
] | null | null | null | from insights.core.spec_factory import SpecSet, RegistryPoint
class Openshift(SpecSet):
cluster_operators = RegistryPoint(raw=True)
crds = RegistryPoint(raw=True)
crs = RegistryPoint(raw=True, multi_output=True)
machine_configs = RegistryPoint(raw=True)
machines = RegistryPoint(raw=True)
machine_id = RegistryPoint(raw=True) # stand in for system id
namespaces = RegistryPoint(raw=True)
nodes = RegistryPoint(raw=True)
pods = RegistryPoint(raw=True)
pvcs = RegistryPoint(raw=True)
storage_classes = RegistryPoint(raw=True)
class Specs(SpecSet):
amq_broker = RegistryPoint(multi_output=True)
auditctl_status = RegistryPoint()
auditd_conf = RegistryPoint()
audit_log = RegistryPoint(filterable=True)
autofs_conf = RegistryPoint()
avc_hash_stats = RegistryPoint()
avc_cache_threshold = RegistryPoint()
aws_instance_id_doc = RegistryPoint()
aws_instance_id_pkcs7 = RegistryPoint()
aws_instance_type = RegistryPoint()
azure_instance_type = RegistryPoint()
bios_uuid = RegistryPoint()
blkid = RegistryPoint()
bond = RegistryPoint(multi_output=True)
bond_dynamic_lb = RegistryPoint(multi_output=True)
boot_loader_entries = RegistryPoint(multi_output=True)
branch_info = RegistryPoint()
brctl_show = RegistryPoint()
candlepin_error_log = RegistryPoint(filterable=True)
candlepin_log = RegistryPoint(filterable=True)
cdc_wdm = RegistryPoint()
checkin_conf = RegistryPoint()
catalina_out = RegistryPoint(multi_output=True, filterable=True)
catalina_server_log = RegistryPoint(multi_output=True, filterable=True)
cciss = RegistryPoint(multi_output=True)
ceilometer_central_log = RegistryPoint(filterable=True)
ceilometer_collector_log = RegistryPoint(filterable=True)
ceilometer_compute_log = RegistryPoint(filterable=True)
ceilometer_conf = RegistryPoint()
ceph_conf = RegistryPoint(filterable=True)
ceph_config_show = RegistryPoint(multi_output=True)
ceph_df_detail = RegistryPoint()
ceph_health_detail = RegistryPoint()
ceph_insights = RegistryPoint()
ceph_log = RegistryPoint(multi_output=True, filterable=True)
ceph_osd_df = RegistryPoint()
ceph_osd_dump = RegistryPoint()
ceph_osd_ec_profile_get = RegistryPoint(multi_output=True)
ceph_osd_ec_profile_ls = RegistryPoint()
ceph_osd_log = RegistryPoint(multi_output=True, filterable=True)
ceph_osd_tree = RegistryPoint()
ceph_osd_tree_text = RegistryPoint()
ceph_report = RegistryPoint()
ceph_s = RegistryPoint()
ceph_v = RegistryPoint()
certificates_enddate = RegistryPoint()
cgroups = RegistryPoint()
chkconfig = RegistryPoint()
chrony_conf = RegistryPoint()
chronyc_sources = RegistryPoint()
cib_xml = RegistryPoint()
cinder_api_log = RegistryPoint(filterable=True)
cinder_conf = RegistryPoint()
cinder_volume_log = RegistryPoint(filterable=True)
cluster_conf = RegistryPoint(filterable=True)
cmdline = RegistryPoint()
cobbler_modules_conf = RegistryPoint()
cobbler_settings = RegistryPoint()
corosync = RegistryPoint()
corosync_conf = RegistryPoint()
cpe = RegistryPoint()
cpu_cores = RegistryPoint(multi_output=True)
cpu_siblings = RegistryPoint(multi_output=True)
cpu_smt_active = RegistryPoint()
cpu_smt_control = RegistryPoint()
cpu_vulns = RegistryPoint(multi_output=True)
cpu_vulns_meltdown = RegistryPoint()
cpu_vulns_spectre_v1 = RegistryPoint()
cpu_vulns_spectre_v2 = RegistryPoint()
cpu_vulns_spec_store_bypass = RegistryPoint()
cpuinfo_max_freq = RegistryPoint()
cpuinfo = RegistryPoint()
cpuset_cpus = RegistryPoint()
crypto_policies_config = RegistryPoint()
crypto_policies_state_current = RegistryPoint()
crypto_policies_opensshserver = RegistryPoint()
crypto_policies_bind = RegistryPoint()
crt = RegistryPoint()
current_clocksource = RegistryPoint()
date_iso = RegistryPoint()
date = RegistryPoint()
date_utc = RegistryPoint()
db2licm_l = RegistryPoint()
dcbtool_gc_dcb = RegistryPoint(multi_output=True)
df__alP = RegistryPoint()
df__al = RegistryPoint()
df__li = RegistryPoint()
dig_dnssec = RegistryPoint()
dig_edns = RegistryPoint()
dig_noedns = RegistryPoint()
dig = RegistryPoint()
dirsrv = RegistryPoint()
dirsrv_access = RegistryPoint(multi_output=True, filterable=True)
dirsrv_errors = RegistryPoint(multi_output=True, filterable=True)
display_java = RegistryPoint()
display_name = RegistryPoint()
dmesg = RegistryPoint(filterable=True)
dmesg_log = RegistryPoint(filterable=True)
dmidecode = RegistryPoint()
dmsetup_info = RegistryPoint()
dnf_modules = RegistryPoint()
dnf_module_list = RegistryPoint()
dnf_module_info = RegistryPoint()
dnsmasq_config = RegistryPoint(multi_output=True)
docker_container_inspect = RegistryPoint(multi_output=True)
docker_host_machine_id = RegistryPoint()
docker_image_inspect = RegistryPoint(multi_output=True)
docker_info = RegistryPoint()
docker_list_containers = RegistryPoint()
docker_list_images = RegistryPoint()
docker_network = RegistryPoint()
docker_storage = RegistryPoint()
docker_storage_setup = RegistryPoint()
docker_sysconfig = RegistryPoint()
dumpe2fs_h = RegistryPoint(multi_output=True)
engine_config_all = RegistryPoint()
engine_log = RegistryPoint(filterable=True)
etc_journald_conf_d = RegistryPoint(multi_output=True)
etc_journald_conf = RegistryPoint()
etc_machine_id = RegistryPoint()
etcd_conf = RegistryPoint(filterable=True)
ethernet_interfaces = RegistryPoint()
ethtool_a = RegistryPoint(multi_output=True)
ethtool_c = RegistryPoint(multi_output=True)
ethtool_g = RegistryPoint(multi_output=True)
ethtool_i = RegistryPoint(multi_output=True)
ethtool_k = RegistryPoint(multi_output=True)
ethtool = RegistryPoint(multi_output=True)
ethtool_S = RegistryPoint(multi_output=True)
ethtool_T = RegistryPoint(multi_output=True)
exim_conf = RegistryPoint()
facter = RegistryPoint()
fc_match = RegistryPoint()
fcoeadm_i = RegistryPoint()
fdisk_l = RegistryPoint()
fdisk_l_sos = RegistryPoint(multi_output=True)
foreman_production_log = RegistryPoint(filterable=True)
foreman_proxy_conf = RegistryPoint()
foreman_proxy_log = RegistryPoint(filterable=True)
foreman_satellite_log = RegistryPoint(filterable=True)
foreman_ssl_access_ssl_log = RegistryPoint(filterable=True)
foreman_rake_db_migrate_status = RegistryPoint()
foreman_tasks_config = RegistryPoint(filterable=True)
freeipa_healthcheck_log = RegistryPoint()
fstab = RegistryPoint()
galera_cnf = RegistryPoint()
getcert_list = RegistryPoint()
getconf_page_size = RegistryPoint()
getenforce = RegistryPoint()
getsebool = RegistryPoint()
glance_api_conf = RegistryPoint()
glance_api_log = RegistryPoint(filterable=True)
glance_cache_conf = RegistryPoint()
glance_registry_conf = RegistryPoint()
gluster_v_info = RegistryPoint()
gluster_v_status = RegistryPoint()
gluster_peer_status = RegistryPoint()
gnocchi_conf = RegistryPoint(filterable=True)
gnocchi_metricd_log = RegistryPoint(filterable=True)
grub_conf = RegistryPoint()
grub_config_perms = RegistryPoint()
grub_efi_conf = RegistryPoint()
grub1_config_perms = RegistryPoint()
grub2_cfg = RegistryPoint()
grub2_efi_cfg = RegistryPoint()
grubby_default_index = RegistryPoint()
grubby_default_kernel = RegistryPoint()
hammer_ping = RegistryPoint()
hammer_task_list = RegistryPoint()
haproxy_cfg = RegistryPoint()
heat_api_log = RegistryPoint(filterable=True)
heat_conf = RegistryPoint()
heat_crontab = RegistryPoint()
heat_crontab_container = RegistryPoint()
heat_engine_log = RegistryPoint(filterable=True)
hostname = RegistryPoint()
hostname_default = RegistryPoint()
hostname_short = RegistryPoint()
hosts = RegistryPoint()
hponcfg_g = RegistryPoint()
httpd_access_log = RegistryPoint(filterable=True)
httpd_conf = RegistryPoint(multi_output=True)
httpd_conf_sos = RegistryPoint(multi_output=True)
httpd_conf_scl_httpd24 = RegistryPoint(multi_output=True)
httpd_conf_scl_jbcs_httpd24 = RegistryPoint(multi_output=True)
httpd_error_log = RegistryPoint(filterable=True)
httpd24_httpd_error_log = RegistryPoint(filterable=True)
jbcs_httpd24_httpd_error_log = RegistryPoint(filterable=True)
httpd_limits = RegistryPoint(multi_output=True)
httpd_M = RegistryPoint(multi_output=True)
httpd_on_nfs = RegistryPoint()
httpd_pid = RegistryPoint()
httpd_ssl_access_log = RegistryPoint(filterable=True)
httpd_ssl_error_log = RegistryPoint(filterable=True)
httpd_V = RegistryPoint(multi_output=True)
virt_uuid_facts = RegistryPoint()
ifcfg = RegistryPoint(multi_output=True)
ifcfg_static_route = RegistryPoint(multi_output=True)
ifconfig = RegistryPoint()
imagemagick_policy = RegistryPoint(multi_output=True, filterable=True)
init_ora = RegistryPoint()
initscript = RegistryPoint(multi_output=True)
init_process_cgroup = RegistryPoint()
installed_rpms = RegistryPoint()
interrupts = RegistryPoint()
ip6tables_permanent = RegistryPoint()
ip6tables = RegistryPoint()
ip_addr = RegistryPoint()
ip_addresses = RegistryPoint()
ipaupgrade_log = RegistryPoint(filterable=True)
ipcs_m = RegistryPoint()
ipcs_m_p = RegistryPoint()
ipcs_s = RegistryPoint()
ipcs_s_i = RegistryPoint(multi_output=True)
ip_netns_exec_namespace_lsof = RegistryPoint(multi_output=True, filterable=True)
ip_route_show_table_all = RegistryPoint()
ip_s_link = RegistryPoint()
iptables_permanent = RegistryPoint()
iptables = RegistryPoint()
ipv4_neigh = RegistryPoint()
ipv6_neigh = RegistryPoint()
ironic_conf = RegistryPoint(filterable=True)
ironic_inspector_log = RegistryPoint(filterable=True)
iscsiadm_m_session = RegistryPoint()
jboss_domain_server_log = RegistryPoint(multi_output=True, filterable=True)
jboss_standalone_server_log = RegistryPoint(multi_output=True, filterable=True)
jboss_standalone_main_config = RegistryPoint(multi_output=True)
jboss_version = RegistryPoint(multi_output=True)
journal_since_boot = RegistryPoint(filterable=True)
katello_service_status = RegistryPoint(filterable=True)
kdump_conf = RegistryPoint()
kerberos_kdc_log = RegistryPoint(filterable=True)
kernel_config = RegistryPoint(multi_output=True, filterable=True)
kexec_crash_loaded = RegistryPoint()
kexec_crash_size = RegistryPoint()
keystone_conf = RegistryPoint()
keystone_crontab = RegistryPoint()
keystone_crontab_container = RegistryPoint()
keystone_log = RegistryPoint(filterable=True)
krb5 = RegistryPoint(multi_output=True)
ksmstate = RegistryPoint()
kubepods_cpu_quota = RegistryPoint(multi_output=True)
lastupload = RegistryPoint(multi_output=True)
libkeyutils_objdumps = RegistryPoint()
libkeyutils = RegistryPoint()
libvirtd_log = RegistryPoint(filterable=True)
limits_conf = RegistryPoint(multi_output=True)
locale = RegistryPoint()
localtime = RegistryPoint()
logrotate_conf = RegistryPoint(multi_output=True)
lpstat_p = RegistryPoint()
ls_boot = RegistryPoint()
ls_dev = RegistryPoint()
ls_disk = RegistryPoint()
ls_docker_volumes = RegistryPoint()
ls_etc = RegistryPoint()
ls_lib_firmware = RegistryPoint()
ls_ocp_cni_openshift_sdn = RegistryPoint()
ls_origin_local_volumes_pods = RegistryPoint()
ls_osroot = RegistryPoint()
ls_run_systemd_generator = RegistryPoint()
ls_R_var_lib_nova_instances = RegistryPoint()
ls_sys_firmware = RegistryPoint()
ls_usr_lib64 = RegistryPoint(filterable=True)
ls_usr_sbin = RegistryPoint(filterable=True)
ls_var_lib_mongodb = RegistryPoint()
ls_var_lib_nova_instances = RegistryPoint()
ls_var_log = RegistryPoint()
ls_var_opt_mssql = RegistryPoint()
ls_var_opt_mssql_log = RegistryPoint()
ls_var_run = RegistryPoint()
ls_var_spool_clientmq = RegistryPoint()
ls_var_spool_postfix_maildrop = RegistryPoint()
ls_var_tmp = RegistryPoint(filterable=True)
ls_var_www = RegistryPoint()
lsblk = RegistryPoint()
lsblk_pairs = RegistryPoint()
lscpu = RegistryPoint()
lsinitrd = RegistryPoint(filterable=True)
lsinitrd_lvm_conf = RegistryPoint()
lsmod = RegistryPoint()
lsof = RegistryPoint(filterable=True)
lspci = RegistryPoint()
lssap = RegistryPoint()
lsscsi = RegistryPoint()
lvdisplay = RegistryPoint()
lvm_conf = RegistryPoint(filterable=True)
lvs_noheadings = RegistryPoint()
lvs_noheadings_all = RegistryPoint()
lvs = RegistryPoint()
mac_addresses = RegistryPoint(multi_output=True)
machine_id = RegistryPoint()
manila_conf = RegistryPoint()
mariadb_log = RegistryPoint(filterable=True)
max_uid = RegistryPoint()
md5chk_files = RegistryPoint(multi_output=True)
mdstat = RegistryPoint()
meminfo = RegistryPoint()
messages = RegistryPoint(filterable=True)
metadata_json = RegistryPoint(raw=True)
mistral_executor_log = RegistryPoint(filterable=True)
mlx4_port = RegistryPoint(multi_output=True)
modinfo_i40e = RegistryPoint()
modinfo_igb = RegistryPoint()
modinfo_ixgbe = RegistryPoint()
modinfo_veth = RegistryPoint()
modinfo_vmxnet3 = RegistryPoint()
modinfo = RegistryPoint(multi_output=True)
modprobe = RegistryPoint(multi_output=True)
module = RegistryPoint()
mongod_conf = RegistryPoint(multi_output=True, filterable=True)
mount = RegistryPoint()
mounts = RegistryPoint()
mssql_conf = RegistryPoint()
multicast_querier = RegistryPoint()
multipath_conf = RegistryPoint()
multipath_conf_initramfs = RegistryPoint()
multipath__v4__ll = RegistryPoint()
mysqladmin_vars = RegistryPoint()
mysql_log = RegistryPoint(multi_output=True, filterable=True)
mysqld_limits = RegistryPoint()
named_checkconf_p = RegistryPoint(filterable=True)
namespace = RegistryPoint()
netconsole = RegistryPoint()
netstat_agn = RegistryPoint()
netstat_i = RegistryPoint()
netstat = RegistryPoint()
netstat_s = RegistryPoint()
networkmanager_dispatcher_d = RegistryPoint(multi_output=True)
neutron_conf = RegistryPoint(filterable=True)
neutron_dhcp_agent_ini = RegistryPoint(filterable=True)
neutron_l3_agent_ini = RegistryPoint(filterable=True)
neutron_l3_agent_log = RegistryPoint(filterable=True)
neutron_metadata_agent_ini = RegistryPoint(filterable=True)
neutron_metadata_agent_log = RegistryPoint(filterable=True)
neutron_ml2_conf = RegistryPoint(filterable=True)
neutron_ovs_agent_log = RegistryPoint(filterable=True)
neutron_plugin_ini = RegistryPoint()
neutron_server_log = RegistryPoint(filterable=True)
nfnetlink_queue = RegistryPoint()
nfs_exports_d = RegistryPoint(multi_output=True)
nfs_exports = RegistryPoint()
nginx_conf = RegistryPoint(multi_output=True)
nmcli_conn_show = RegistryPoint()
nmcli_dev_show = RegistryPoint()
nmcli_dev_show_sos = RegistryPoint(multi_output=True)
nova_api_log = RegistryPoint(filterable=True)
nova_compute_log = RegistryPoint(filterable=True)
nova_conf = RegistryPoint()
nova_crontab = RegistryPoint()
nova_crontab_container = RegistryPoint()
nova_uid = RegistryPoint()
nova_migration_uid = RegistryPoint()
nscd_conf = RegistryPoint(filterable=True)
nsswitch_conf = RegistryPoint(filterable=True)
ntp_conf = RegistryPoint()
ntpq_leap = RegistryPoint()
ntpq_pn = RegistryPoint()
ntptime = RegistryPoint()
numa_cpus = RegistryPoint(multi_output=True)
numeric_user_group_name = RegistryPoint()
nvme_core_io_timeout = RegistryPoint()
oc_get_bc = RegistryPoint()
oc_get_build = RegistryPoint()
oc_get_clusterrole_with_config = RegistryPoint()
oc_get_clusterrolebinding_with_config = RegistryPoint()
oc_get_dc = RegistryPoint()
oc_get_egressnetworkpolicy = RegistryPoint()
oc_get_endpoints = RegistryPoint()
oc_get_event = RegistryPoint()
oc_get_node = RegistryPoint()
oc_get_pod = RegistryPoint()
oc_get_project = RegistryPoint()
oc_get_pvc = RegistryPoint()
oc_get_pv = RegistryPoint()
oc_get_rc = RegistryPoint()
oc_get_rolebinding = RegistryPoint()
oc_get_role = RegistryPoint()
oc_get_route = RegistryPoint()
oc_get_service = RegistryPoint()
oc_get_configmap = RegistryPoint()
odbc_ini = RegistryPoint(filterable=True)
odbcinst_ini = RegistryPoint()
openvswitch_other_config = RegistryPoint()
openvswitch_server_log = RegistryPoint(filterable=True)
openshift_certificates = RegistryPoint(multi_output=True)
openshift_fluentd_environ = RegistryPoint(multi_output=True)
openshift_hosts = RegistryPoint(filterable=True)
openshift_router_environ = RegistryPoint(multi_output=True)
openvswitch_daemon_log = RegistryPoint(filterable=True)
openvswitch_server_log = RegistryPoint(filterable=True)
osa_dispatcher_log = RegistryPoint(filterable=True)
ose_master_config = RegistryPoint()
ose_node_config = RegistryPoint()
os_release = RegistryPoint()
ovirt_engine_boot_log = RegistryPoint(filterable=True)
ovirt_engine_confd = RegistryPoint(multi_output=True)
ovirt_engine_console_log = RegistryPoint(filterable=True)
ovirt_engine_server_log = RegistryPoint(filterable=True)
ovirt_engine_ui_log = RegistryPoint(filterable=True)
ovs_appctl_fdb_show_bridge = RegistryPoint(multi_output=True)
ovs_ofctl_dump_flows = RegistryPoint(multi_output=True)
ovs_vsctl_list_bridge = RegistryPoint()
ovs_vsctl_show = RegistryPoint()
ovs_vswitchd_limits = RegistryPoint()
pacemaker_log = RegistryPoint(filterable=True)
package_provides_java = RegistryPoint(multi_output=True)
package_provides_httpd = RegistryPoint(multi_output=True)
pam_conf = RegistryPoint()
parted__l = RegistryPoint()
partitions = RegistryPoint()
passenger_status = RegistryPoint()
password_auth = RegistryPoint()
pci_rport_target_disk_paths = RegistryPoint()
pcs_config = RegistryPoint()
pcs_quorum_status = RegistryPoint()
pcs_status = RegistryPoint()
pluginconf_d = RegistryPoint(multi_output=True)
postgresql_conf = RegistryPoint()
postgresql_log = RegistryPoint(multi_output=True, filterable=True)
prev_uploader_log = RegistryPoint()
proc_snmp_ipv4 = RegistryPoint()
proc_snmp_ipv6 = RegistryPoint()
proc_stat = RegistryPoint()
ps_alxwww = RegistryPoint(filterable=True)
ps_aux = RegistryPoint(filterable=True)
ps_auxcww = RegistryPoint()
ps_auxww = RegistryPoint(filterable=True)
ps_ef = RegistryPoint(filterable=True)
ps_eo = RegistryPoint()
pulp_worker_defaults = RegistryPoint()
puppet_ssl_cert_ca_pem = RegistryPoint()
puppetserver_config = RegistryPoint(filterable=True)
pvs_noheadings = RegistryPoint()
pvs_noheadings_all = RegistryPoint()
pvs = RegistryPoint()
qemu_conf = RegistryPoint()
qemu_xml = RegistryPoint(multi_output=True)
qpid_stat_g = RegistryPoint()
qpid_stat_q = RegistryPoint()
qpid_stat_u = RegistryPoint()
qpidd_conf = RegistryPoint()
rabbitmq_env = RegistryPoint()
rabbitmq_logs = RegistryPoint(multi_output=True, filterable=True)
rabbitmq_policies = RegistryPoint()
rabbitmq_queues = RegistryPoint()
rabbitmq_report = RegistryPoint()
rabbitmq_report_of_containers = RegistryPoint(multi_output=True)
rabbitmq_startup_err = RegistryPoint(filterable=True)
rabbitmq_startup_log = RegistryPoint(filterable=True)
rabbitmq_users = RegistryPoint()
rc_local = RegistryPoint()
rdma_conf = RegistryPoint()
redhat_release = RegistryPoint()
resolv_conf = RegistryPoint()
rhev_data_center = RegistryPoint()
rhv_log_collector_analyzer = RegistryPoint()
rhn_charsets = RegistryPoint()
rhn_conf = RegistryPoint()
rhn_entitlement_cert_xml = RegistryPoint(multi_output=True)
rhn_hibernate_conf = RegistryPoint()
rhn_schema_stats = RegistryPoint()
rhn_schema_version = RegistryPoint()
rhn_search_daemon_log = RegistryPoint(filterable=True)
rhn_server_satellite_log = RegistryPoint(filterable=True)
rhn_server_xmlrpc_log = RegistryPoint(filterable=True)
rhn_taskomatic_daemon_log = RegistryPoint(filterable=False)
rhsm_conf = RegistryPoint()
rhsm_log = RegistryPoint(filterable=True)
rndc_status = RegistryPoint()
root_crontab = RegistryPoint()
route = RegistryPoint()
rpm_V_packages = RegistryPoint()
rsyslog_conf = RegistryPoint(filterable=True)
running_java = RegistryPoint()
samba = RegistryPoint(filterable=True)
sap_hdb_version = RegistryPoint(multi_output=True)
sap_host_profile = RegistryPoint(filterable=True)
sapcontrol_getsystemupdatelist = RegistryPoint()
saphostctl_getcimobject_sapinstance = RegistryPoint(filterable=True)
saphostexec_status = RegistryPoint()
saphostexec_version = RegistryPoint()
sat5_insights_properties = RegistryPoint()
satellite_version_rb = RegistryPoint()
scheduler = RegistryPoint(multi_output=True)
scsi = RegistryPoint()
sctp_asc = RegistryPoint()
sctp_eps = RegistryPoint()
scsi_eh_deadline = RegistryPoint(multi_output=True)
scsi_fwver = RegistryPoint(multi_output=True)
secure = RegistryPoint(filterable=True)
selinux_config = RegistryPoint()
semid = RegistryPoint()
sestatus = RegistryPoint()
setup_named_chroot = RegistryPoint(filterable=True)
smartctl = RegistryPoint(multi_output=True)
smartpdc_settings = RegistryPoint(filterable=True)
smbstatus_p = RegistryPoint()
smbstatus_S = RegistryPoint()
softnet_stat = RegistryPoint()
software_collections_list = RegistryPoint()
spfile_ora = RegistryPoint(multi_output=True)
ssh_config = RegistryPoint(filterable=True)
ssh_foreman_config = RegistryPoint(filterable=True)
ssh_foreman_proxy_config = RegistryPoint(filterable=True)
sshd_config_perms = RegistryPoint()
sshd_config = RegistryPoint(filterable=True)
ss = RegistryPoint()
sssd_config = RegistryPoint()
sssd_logs = RegistryPoint(multi_output=True, filterable=True)
subscription_manager_id = RegistryPoint()
subscription_manager_list_consumed = RegistryPoint()
subscription_manager_list_installed = RegistryPoint()
subscription_manager_installed_product_ids = RegistryPoint(filterable=True)
subscription_manager_release_show = RegistryPoint()
swift_conf = RegistryPoint()
swift_log = RegistryPoint(filterable=True)
swift_object_expirer_conf = RegistryPoint()
swift_proxy_server_conf = RegistryPoint()
sysconfig_chronyd = RegistryPoint()
sysconfig_httpd = RegistryPoint()
sysconfig_irqbalance = RegistryPoint()
sysconfig_kdump = RegistryPoint()
sysconfig_libvirt_guests = RegistryPoint()
sysconfig_memcached = RegistryPoint()
sysconfig_mongod = RegistryPoint(multi_output=True)
sysconfig_network = RegistryPoint()
sysconfig_ntpd = RegistryPoint()
sysconfig_prelink = RegistryPoint()
sysconfig_sshd = RegistryPoint()
sysconfig_virt_who = RegistryPoint()
sysctl_conf_initramfs = RegistryPoint(multi_output=True)
sysctl_conf = RegistryPoint()
sysctl = RegistryPoint()
systemctl_cat_rpcbind_socket = RegistryPoint()
systemctl_cinder_volume = RegistryPoint()
systemctl_httpd = RegistryPoint()
systemctl_nginx = RegistryPoint()
systemctl_list_unit_files = RegistryPoint()
systemctl_list_units = RegistryPoint()
systemctl_mariadb = RegistryPoint()
systemctl_pulp_workers = RegistryPoint()
systemctl_pulp_resmg = RegistryPoint()
systemctl_pulp_celerybeat = RegistryPoint()
systemctl_qpidd = RegistryPoint()
systemctl_qdrouterd = RegistryPoint()
systemctl_smartpdc = RegistryPoint()
systemd_docker = RegistryPoint()
systemd_logind_conf = RegistryPoint()
systemd_openshift_node = RegistryPoint()
systemd_system_conf = RegistryPoint()
systemd_system_origin_accounting = RegistryPoint()
systemid = RegistryPoint()
systool_b_scsi_v = RegistryPoint()
teamdctl_config_dump = RegistryPoint(multi_output=True)
teamdctl_state_dump = RegistryPoint(multi_output=True)
thp_enabled = RegistryPoint()
thp_use_zero_page = RegistryPoint()
tmpfilesd = RegistryPoint(multi_output=True)
tomcat_server_xml = RegistryPoint(multi_output=True)
tomcat_vdc_fallback = RegistryPoint()
tomcat_vdc_targeted = RegistryPoint(multi_output=True)
tomcat_web_xml = RegistryPoint(multi_output=True)
tuned_adm = RegistryPoint()
tuned_conf = RegistryPoint()
udev_persistent_net_rules = RegistryPoint()
ulimit_hard = RegistryPoint()
uname = RegistryPoint()
up2date = RegistryPoint()
up2date_log = RegistryPoint(filterable=True)
uploader_log = RegistryPoint()
uptime = RegistryPoint()
usr_journald_conf_d = RegistryPoint(multi_output=True)
var_qemu_xml = RegistryPoint(multi_output=True)
vdsm_conf = RegistryPoint()
vdsm_id = RegistryPoint()
vdsm_import_log = RegistryPoint(multi_output=True, filterable=True)
vdsm_log = RegistryPoint(filterable=True)
vdsm_logger_conf = RegistryPoint()
version_info = RegistryPoint()
vdo_status = RegistryPoint()
vgdisplay = RegistryPoint()
vgs_noheadings = RegistryPoint()
vgs_noheadings_all = RegistryPoint()
vgs = RegistryPoint()
virsh_list_all = RegistryPoint()
virt_what = RegistryPoint()
virt_who_conf = RegistryPoint(multi_output=True, filterable=True)
virtlogd_conf = RegistryPoint(filterable=True)
vmcore_dmesg = RegistryPoint(multi_output=True, filterable=True)
vmware_tools_conf = RegistryPoint()
vsftpd_conf = RegistryPoint(filterable=True)
vsftpd = RegistryPoint()
woopsie = RegistryPoint()
x86_pti_enabled = RegistryPoint()
x86_ibpb_enabled = RegistryPoint()
x86_ibrs_enabled = RegistryPoint()
x86_retp_enabled = RegistryPoint()
xfs_info = RegistryPoint(multi_output=True)
xinetd_conf = RegistryPoint(multi_output=True)
yum_conf = RegistryPoint()
yum_list_installed = RegistryPoint()
yum_log = RegistryPoint()
yum_repolist = RegistryPoint()
yum_repos_d = RegistryPoint(multi_output=True)
zipl_conf = RegistryPoint()
| 42.7888 | 84 | 0.757843 |
b44e4943769f19418cc181ba078f19dd74547f61 | 365 | py | Python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/deploymentscripts/models.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2019-05-17T21:24:53.000Z | 2020-02-12T11:13:42.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/deploymentscripts/models.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 226 | 2019-07-24T07:57:21.000Z | 2019-10-15T01:07:24.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/deploymentscripts/models.py | iscai-msft/azure-sdk-for-python | 83715b95c41e519d5be7f1180195e2fba136fc0f | [
"MIT"
] | 2 | 2020-05-21T22:51:22.000Z | 2020-05-26T20:53:01.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
from .v2019_10_preview.models import *
| 45.625 | 76 | 0.452055 |
38cea18b85b823c9a8786c6143e6d146a10900ba | 2,803 | py | Python | airom/postprocess.py | jcgh582/physio-rom | 15ec69a5a2a9c157d4dcfedac985c3549db5208e | [
"MIT"
] | null | null | null | airom/postprocess.py | jcgh582/physio-rom | 15ec69a5a2a9c157d4dcfedac985c3549db5208e | [
"MIT"
] | null | null | null | airom/postprocess.py | jcgh582/physio-rom | 15ec69a5a2a9c157d4dcfedac985c3549db5208e | [
"MIT"
] | null | null | null | import numpy as np
# Given an image and a range of motion test this will generate a report.
# ------------------------------ UTILITIES
def getJointROM(data, joint_ind):
# Get extremal values of a given joint_ind
# data : output from getAnglesInDir
# joint_ind : index to joint. In order: 'Right elbow','Left elbow','Right Shoulder','Left Shoulder','Right Knee','Left Knee','Right Hip','Left Hip'
return {'min': np.nanmin(data['angles'][:,joint_ind]),'max':np.nanmax(data['angles'][:,joint_ind])}
def getJointROM_frames(data, joint_ind, num_frames):
# Tries to get a series of imags which show the ROM of a given joint by selecting rendered frames that are closest to linearly spaced values of the joint angle between its extremal values
# data : output from getAnglesInDir
# joint_ind : index to joint
# num_frames : number of frames to retrieve showing ROM
# Get ROM
rom = getJointROM(data,joint_ind)
# Get ideal list of linearly spaced angles
frame_angles_ideal = np.linspace(rom["min"],rom["max"],num=num_frames)
# Get list of angles closest to ideal and their frame indices
frame_angles_inds = np.nanargmin(np.abs(np.subtract(frame_angles_ideal.reshape(len(frame_angles_ideal),1),data["angles"][:,joint_ind])),axis=1)
frame_angles = data["angles"][:,joint_ind][frame_angles_inds]
frame_angles_sign = data["angles_sign"][:,joint_ind][frame_angles_inds]
frame_angles_conf = data["confidence"][:,joint_ind][frame_angles_inds]
return {'angles_ideal':frame_angles_ideal,'angles':frame_angles,
'angles_frame_inds':frame_angles_inds,'angles_sign':frame_angles_sign,
'angles_conf':frame_angles_conf}
# ------------------------------ POSTPROCESS
def cleanFloats(x): return str(int(round(x,0)))
def testing(data):
with open("airom/report-templates/template-link.svg") as f:
strn = f.read()
return strn
def elbowJointROM(data):
with open("airom/report-templates/elbow-template.svg") as f:
strn = f.read()
processed = getJointROM_frames(data, 0, 3)
strn = strn.replace("testmin", cleanFloats(180-processed['angles'][0]))
strn = strn.replace("testmax", cleanFloats(processed['angles'][2]))
strn = strn.replace("testrom", cleanFloats(processed['angles'][2]-processed['angles'][0]))
strn = strn.replace("testconf", str(round((processed['angles_conf'][1]+processed['angles_conf'][2])/2.0,2)))
# embed stages of the motion
return strn
options = {
0: testing,
1: elbowJointROM
}
options["test"] = testing
options["elbow"] = elbowJointROM
def postproc(data, romt):
# options = global dictionary
# romt = report type id
# options[romt] = function(string, string) -> svg string
return options[romt](data)
| 40.623188 | 191 | 0.683553 |
76a2be026f14f9b8d542dad6a4376191c7f9ab1f | 466 | py | Python | photos/urls.py | petersoleeh/TheGram | 4ab3fc8a6caaa59cf71868efa035ec9418ce254a | [
"MIT"
] | 2 | 2018-10-17T18:08:01.000Z | 2020-05-17T02:53:43.000Z | photos/urls.py | petersoleeh/TheGram | 4ab3fc8a6caaa59cf71868efa035ec9418ce254a | [
"MIT"
] | null | null | null | photos/urls.py | petersoleeh/TheGram | 4ab3fc8a6caaa59cf71868efa035ec9418ce254a | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
from django.conf import settings
from django.conf.urls.static import static
urlpatterns=[
url(r'^$',views.index,name='index'),
url(r'^profile/',views.profile,name='profile'),
url(r'^new/post/', views.new_post, name='new-post'),
url(r'^profile/edit',views.update_profile,name='edit-profile'),
]
if settings.DEBUG:
urlpatterns+= static(settings.MEDIA_URL, document_root = settings.MEDIA_ROOT)
| 31.066667 | 81 | 0.725322 |
a400ac4d939f84927b822cc42e5b2a1cbee87971 | 12,995 | py | Python | recognition/ArcFace/train_parall.py | hhsummerwind/insightface | a78836fdf8a609c7d2ddc0460a98d163156f459f | [
"MIT"
] | 1 | 2021-02-05T18:44:06.000Z | 2021-02-05T18:44:06.000Z | recognition/ArcFace/train_parall.py | charmere/insightface | 60bb5829b1d76bfcec7930ce61c41dde26413279 | [
"MIT"
] | null | null | null | recognition/ArcFace/train_parall.py | charmere/insightface | 60bb5829b1d76bfcec7930ce61c41dde26413279 | [
"MIT"
] | null | null | null |
'''
@author: insightface
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import math
import random
import logging
import pickle
import sklearn
import numpy as np
from image_iter import FaceImageIter
import mxnet as mx
from mxnet import ndarray as nd
import argparse
import mxnet.optimizer as optimizer
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'common'))
import flops_counter
from config import config, default, generate_config
import verification
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'symbol'))
import fresnet
import fmobilefacenet
import fmobilenet
import fmnasnet
import fdensenet
import vargfacenet
logger = logging.getLogger()
logger.setLevel(logging.INFO)
args = None
def parse_args():
parser = argparse.ArgumentParser(description='Train parall face network')
# general
parser.add_argument('--dataset', default=default.dataset, help='dataset config')
parser.add_argument('--network', default=default.network, help='network config')
parser.add_argument('--loss', default=default.loss, help='loss config')
args, rest = parser.parse_known_args()
generate_config(args.network, args.dataset, args.loss)
parser.add_argument('--models-root', default=default.models_root, help='root directory to save model.')
parser.add_argument('--pretrained', default=default.pretrained, help='pretrained model to load')
parser.add_argument('--pretrained-epoch', type=int, default=default.pretrained_epoch, help='pretrained epoch to load')
parser.add_argument('--ckpt', type=int, default=default.ckpt, help='checkpoint saving option. 0: discard saving. 1: save when necessary. 2: always save')
parser.add_argument('--verbose', type=int, default=default.verbose, help='do verification testing and model saving every verbose batches')
parser.add_argument('--lr', type=float, default=default.lr, help='start learning rate')
parser.add_argument('--lr-steps', type=str, default=default.lr_steps, help='steps of lr changing')
parser.add_argument('--wd', type=float, default=default.wd, help='weight decay')
parser.add_argument('--mom', type=float, default=default.mom, help='momentum')
parser.add_argument('--frequent', type=int, default=default.frequent, help='')
parser.add_argument('--per-batch-size', type=int, default=default.per_batch_size, help='batch size in each context')
parser.add_argument('--kvstore', type=str, default=default.kvstore, help='kvstore setting')
parser.add_argument('--worker-id', type=int, default=0, help='worker id for dist training, starts from 0')
parser.add_argument('--extra-model-name', type=str, default='', help='extra model name')
args = parser.parse_args()
return args
def get_symbol_embedding():
embedding = eval(config.net_name).get_symbol()
all_label = mx.symbol.Variable('softmax_label')
#embedding = mx.symbol.BlockGrad(embedding)
all_label = mx.symbol.BlockGrad(all_label)
out_list = [embedding, all_label]
out = mx.symbol.Group(out_list)
return out
def get_symbol_arcface(args):
embedding = mx.symbol.Variable('data')
all_label = mx.symbol.Variable('softmax_label')
gt_label = all_label
is_softmax = True
#print('call get_sym_arcface with', args, config)
_weight = mx.symbol.Variable("fc7_%d_weight"%args._ctxid, shape=(args.ctx_num_classes, config.emb_size),
lr_mult=config.fc7_lr_mult, wd_mult=config.fc7_wd_mult)
if config.loss_name=='softmax': #softmax
fc7 = mx.sym.FullyConnected(data=embedding, weight = _weight, no_bias = True, num_hidden=args.ctx_num_classes, name='fc7_%d'%args._ctxid)
elif config.loss_name=='margin_softmax':
_weight = mx.symbol.L2Normalization(_weight, mode='instance')
nembedding = mx.symbol.L2Normalization(embedding, mode='instance', name='fc1n_%d'%args._ctxid)
fc7 = mx.sym.FullyConnected(data=nembedding, weight = _weight, no_bias = True, num_hidden=args.ctx_num_classes, name='fc7_%d'%args._ctxid)
if config.loss_m1!=1.0 or config.loss_m2!=0.0 or config.loss_m3!=0.0:
gt_one_hot = mx.sym.one_hot(gt_label, depth = args.ctx_num_classes, on_value = 1.0, off_value = 0.0)
if config.loss_m1==1.0 and config.loss_m2==0.0:
_one_hot = gt_one_hot*config.loss_m3
fc7 = fc7-_one_hot
else:
fc7_onehot = fc7 * gt_one_hot
cos_t = fc7_onehot
t = mx.sym.arccos(cos_t)
if config.loss_m1!=1.0:
t = t*config.loss_m1
if config.loss_m2!=0.0:
t = t+config.loss_m2
margin_cos = mx.sym.cos(t)
if config.loss_m3!=0.0:
margin_cos = margin_cos - config.loss_m3
margin_fc7 = margin_cos
margin_fc7_onehot = margin_fc7 * gt_one_hot
diff = margin_fc7_onehot - fc7_onehot
fc7 = fc7+diff
fc7 = fc7*config.loss_s
out_list = []
out_list.append(fc7)
if config.loss_name=='softmax': #softmax
out_list.append(gt_label)
out = mx.symbol.Group(out_list)
return out
def train_net(args):
#_seed = 727
#random.seed(_seed)
#np.random.seed(_seed)
#mx.random.seed(_seed)
ctx = []
cvd = os.environ['CUDA_VISIBLE_DEVICES'].strip()
if len(cvd)>0:
for i in range(len(cvd.split(','))):
ctx.append(mx.gpu(i))
if len(ctx)==0:
ctx = [mx.cpu()]
print('use cpu')
else:
print('gpu num:', len(ctx))
if len(args.extra_model_name)==0:
prefix = os.path.join(args.models_root, '%s-%s-%s'%(args.network, args.loss, args.dataset), 'model')
else:
prefix = os.path.join(args.models_root, '%s-%s-%s-%s'%(args.network, args.loss, args.dataset, args.extra_model_name), 'model')
prefix_dir = os.path.dirname(prefix)
print('prefix', prefix)
if not os.path.exists(prefix_dir):
os.makedirs(prefix_dir)
args.ctx_num = len(ctx)
if args.per_batch_size==0:
args.per_batch_size = 128
args.batch_size = args.per_batch_size*args.ctx_num
args.rescale_threshold = 0
args.image_channel = config.image_shape[2]
config.batch_size = args.batch_size
config.per_batch_size = args.per_batch_size
data_dir = config.dataset_path
path_imgrec = None
path_imglist = None
image_size = config.image_shape[0:2]
assert len(image_size)==2
assert image_size[0]==image_size[1]
print('image_size', image_size)
print('num_classes', config.num_classes)
path_imgrec = os.path.join(data_dir, "train.rec")
data_shape = (args.image_channel,image_size[0],image_size[1])
num_workers = config.num_workers
global_num_ctx = num_workers * args.ctx_num
if config.num_classes%global_num_ctx==0:
args.ctx_num_classes = config.num_classes//global_num_ctx
else:
args.ctx_num_classes = config.num_classes//global_num_ctx+1
args.local_num_classes = args.ctx_num_classes * args.ctx_num
args.local_class_start = args.local_num_classes * args.worker_id
#if len(args.partial)==0:
# local_classes_range = (0, args.num_classes)
#else:
# _vec = args.partial.split(',')
# local_classes_range = (int(_vec[0]), int(_vec[1]))
#args.partial_num_classes = local_classes_range[1] - local_classes_range[0]
#args.partial_start = local_classes_range[0]
print('Called with argument:', args, config)
mean = None
begin_epoch = 0
base_lr = args.lr
base_wd = args.wd
base_mom = args.mom
arg_params = None
aux_params = None
if len(args.pretrained)==0:
esym = get_symbol_embedding()
asym = get_symbol_arcface
else:
#assert False
print('loading', args.pretrained, args.pretrained_epoch)
_, arg_params, aux_params = mx.model.load_checkpoint(args.pretrained, args.pretrained_epoch)
esym = get_symbol_embedding()
asym = get_symbol_arcface
if config.count_flops:
all_layers = esym.get_internals()
_sym = all_layers['fc1_output']
FLOPs = flops_counter.count_flops(_sym, data=(1,3,image_size[0],image_size[1]))
_str = flops_counter.flops_str(FLOPs)
print('Network FLOPs: %s'%_str)
if config.num_workers==1:
from parall_module_local_v1 import ParallModule
else:
from parall_module_dist import ParallModule
model = ParallModule(
context = ctx,
symbol = esym,
data_names = ['data'],
label_names = ['softmax_label'],
asymbol = asym,
args = args,
)
val_dataiter = None
train_dataiter = FaceImageIter(
batch_size = args.batch_size,
data_shape = data_shape,
path_imgrec = path_imgrec,
shuffle = True,
rand_mirror = config.data_rand_mirror,
mean = mean,
cutoff = config.data_cutoff,
color_jittering = config.data_color,
images_filter = config.data_images_filter,
)
if config.net_name=='fresnet' or config.net_name=='fmobilefacenet':
initializer = mx.init.Xavier(rnd_type='gaussian', factor_type="out", magnitude=2) #resnet style
else:
initializer = mx.init.Xavier(rnd_type='uniform', factor_type="in", magnitude=2)
_rescale = 1.0/args.batch_size
opt = optimizer.SGD(learning_rate=base_lr, momentum=base_mom, wd=base_wd, rescale_grad=_rescale)
_cb = mx.callback.Speedometer(args.batch_size, args.frequent)
ver_list = []
ver_name_list = []
for name in config.val_targets:
path = os.path.join(data_dir,name+".bin")
if os.path.exists(path):
data_set = verification.load_bin(path, image_size)
ver_list.append(data_set)
ver_name_list.append(name)
print('ver', name)
def ver_test(nbatch):
results = []
for i in range(len(ver_list)):
acc1, std1, acc2, std2, xnorm, embeddings_list = verification.test(ver_list[i], model, args.batch_size, 10, None, None)
print('[%s][%d]XNorm: %f' % (ver_name_list[i], nbatch, xnorm))
#print('[%s][%d]Accuracy: %1.5f+-%1.5f' % (ver_name_list[i], nbatch, acc1, std1))
print('[%s][%d]Accuracy-Flip: %1.5f+-%1.5f' % (ver_name_list[i], nbatch, acc2, std2))
results.append(acc2)
return results
highest_acc = [0.0, 0.0] #lfw and target
#for i in range(len(ver_list)):
# highest_acc.append(0.0)
global_step = [0]
save_step = [0]
lr_steps = [int(x) for x in args.lr_steps.split(',')]
print('lr_steps', lr_steps)
def _batch_callback(param):
#global global_step
global_step[0]+=1
mbatch = global_step[0]
for step in lr_steps:
if mbatch==step:
opt.lr *= 0.1
print('lr change to', opt.lr)
break
_cb(param)
if mbatch%1000==0:
print('lr-batch-epoch:',opt.lr,param.nbatch,param.epoch)
if mbatch>=0 and mbatch%args.verbose==0:
acc_list = ver_test(mbatch)
save_step[0]+=1
msave = save_step[0]
do_save = False
is_highest = False
if len(acc_list)>0:
#lfw_score = acc_list[0]
#if lfw_score>highest_acc[0]:
# highest_acc[0] = lfw_score
# if lfw_score>=0.998:
# do_save = True
score = sum(acc_list)
if acc_list[-1]>=highest_acc[-1]:
if acc_list[-1]>highest_acc[-1]:
is_highest = True
else:
if score>=highest_acc[0]:
is_highest = True
highest_acc[0] = score
highest_acc[-1] = acc_list[-1]
#if lfw_score>=0.99:
# do_save = True
if is_highest:
do_save = True
if args.ckpt==0:
do_save = False
elif args.ckpt==2:
do_save = True
elif args.ckpt==3:
msave = 1
if do_save:
print('saving', msave)
arg, aux = model.get_export_params()
all_layers = model.symbol.get_internals()
_sym = all_layers['fc1_output']
mx.model.save_checkpoint(prefix, msave, _sym, arg, aux)
print('[%d]Accuracy-Highest: %1.5f'%(mbatch, highest_acc[-1]))
if config.max_steps>0 and mbatch>config.max_steps:
sys.exit(0)
epoch_cb = None
train_dataiter = mx.io.PrefetchingIter(train_dataiter)
model.fit(train_dataiter,
begin_epoch = begin_epoch,
num_epoch = 999999,
eval_data = val_dataiter,
#eval_metric = eval_metrics,
kvstore = args.kvstore,
optimizer = opt,
#optimizer_params = optimizer_params,
initializer = initializer,
arg_params = arg_params,
aux_params = aux_params,
allow_missing = True,
batch_end_callback = _batch_callback,
epoch_end_callback = epoch_cb )
def main():
global args
args = parse_args()
train_net(args)
if __name__ == '__main__':
main()
| 36.298883 | 155 | 0.656406 |
a3304d53baf0a00ebc8ef3b517856ec6cfb00538 | 1,451 | py | Python | auth-api/tests/unit/utils/test_logging.py | karthik-aot/sbc-auth | f24028040fda67d4f10ae9b608b8832c15d2a8ad | [
"Apache-2.0"
] | 11 | 2019-09-26T06:58:25.000Z | 2022-01-26T06:19:39.000Z | auth-api/tests/unit/utils/test_logging.py | karthik-aot/sbc-auth | f24028040fda67d4f10ae9b608b8832c15d2a8ad | [
"Apache-2.0"
] | 1,622 | 2019-05-07T21:08:38.000Z | 2022-03-28T17:07:15.000Z | auth-api/tests/unit/utils/test_logging.py | karthik-aot/sbc-auth | f24028040fda67d4f10ae9b608b8832c15d2a8ad | [
"Apache-2.0"
] | 98 | 2019-03-01T21:36:15.000Z | 2021-12-01T22:11:25.000Z | # Copyright © 2019 Province of British Columbia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests to assure the logging utilities.
Test-Suite to ensure that the logging setup is working as expected.
"""
import os
from auth_api.utils.util_logging import setup_logging
def test_logging_with_file(capsys):
"""Assert that logging is setup with the configuration file."""
file_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'logging.conf')
setup_logging(file_path) # important to do this first
captured = capsys.readouterr()
assert captured.out.startswith('Configure logging, from conf')
def test_logging_with_missing_file(capsys):
"""Assert that a message is sent to STDERR when the configuration doesn't exist."""
file_path = None
setup_logging(file_path) # important to do this first
captured = capsys.readouterr()
assert captured.err.startswith('Unable to configure logging')
| 33.744186 | 88 | 0.753274 |
44375316ce9cbbe8fe3381afb16d30f77dbbced3 | 15,242 | py | Python | locust/argument_parser.py | lhupfeldt/locust | dd14aac36e1aaa99a8acdb8ab6ad926ef8843ca1 | [
"MIT"
] | 1 | 2021-10-22T02:58:36.000Z | 2021-10-22T02:58:36.000Z | locust/argument_parser.py | FremanZhang/locust | aa7b34db3a87fa5cd7a2fa9e4030777f8ca2f3a3 | [
"MIT"
] | null | null | null | locust/argument_parser.py | FremanZhang/locust | aa7b34db3a87fa5cd7a2fa9e4030777f8ca2f3a3 | [
"MIT"
] | null | null | null | import argparse
import os
import sys
import textwrap
import configargparse
import locust
version = locust.__version__
DEFAULT_CONFIG_FILES = ["~/.locust.conf", "locust.conf"]
def _is_package(path):
"""
Is the given path a Python package?
"""
return os.path.isdir(path) and os.path.exists(os.path.join(path, "__init__.py"))
def find_locustfile(locustfile):
"""
Attempt to locate a locustfile, either explicitly or by searching parent dirs.
"""
# Obtain env value
names = [locustfile]
# Create .py version if necessary
if not names[0].endswith(".py"):
names.append(names[0] + ".py")
# Does the name contain path elements?
if os.path.dirname(names[0]):
# If so, expand home-directory markers and test for existence
for name in names:
expanded = os.path.expanduser(name)
if os.path.exists(expanded):
if name.endswith(".py") or _is_package(expanded):
return os.path.abspath(expanded)
else:
# Otherwise, start in cwd and work downwards towards filesystem root
path = os.path.abspath(".")
while True:
for name in names:
joined = os.path.join(path, name)
if os.path.exists(joined):
if name.endswith(".py") or _is_package(joined):
return os.path.abspath(joined)
parent_path = os.path.dirname(path)
if parent_path == path:
# we've reached the root path which has been checked this iteration
break
path = parent_path
# Implicit 'return None' if nothing was found
def get_empty_argument_parser(add_help=True, default_config_files=DEFAULT_CONFIG_FILES):
parser = configargparse.ArgumentParser(
default_config_files=default_config_files,
add_env_var_help=False,
add_config_file_help=False,
add_help=add_help,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=argparse.SUPPRESS,
description=textwrap.dedent(
"""
Usage: locust [OPTIONS] [UserClass ...]
"""
),
# epilog="",
)
parser.add_argument(
"-f",
"--locustfile",
default="locustfile",
help="Python module file to import, e.g. '../other.py'. Default: locustfile",
env_var="LOCUST_LOCUSTFILE",
)
parser.add_argument("--config", is_config_file_arg=True, help="Config file path")
return parser
def parse_locustfile_option(args=None):
"""
Construct a command line parser that is only used to parse the -f argument so that we can
import the test scripts in case any of them adds additional command line arguments to the
parser
"""
parser = get_empty_argument_parser(add_help=False)
parser.add_argument(
"-h",
"--help",
action="store_true",
default=False,
)
parser.add_argument(
"--version",
"-V",
action="store_true",
default=False,
)
options, _ = parser.parse_known_args(args=args)
locustfile = find_locustfile(options.locustfile)
if not locustfile:
if options.help or options.version:
# if --help or --version is specified we'll call parse_options which will print the help/version message
parse_options(args=args)
sys.stderr.write(
"Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.\n"
)
sys.exit(1)
if locustfile == "locust.py":
sys.stderr.write("The locustfile must not be named `locust.py`. Please rename the file and try again.\n")
sys.exit(1)
return locustfile
def setup_parser_arguments(parser):
"""
Setup command-line options
Takes a configargparse.ArgumentParser as argument and calls it's add_argument
for each of the supported arguments
"""
parser._optionals.title = "Common options"
parser.add_argument(
"-H",
"--host",
help="Host to load test in the following format: http://10.21.32.33",
env_var="LOCUST_HOST",
)
parser.add_argument(
"-u",
"--users",
type=int,
dest="num_users",
help="Number of concurrent Locust users. Primarily used together with --headless",
env_var="LOCUST_USERS",
)
parser.add_argument(
"-r",
"--spawn-rate",
type=float,
help="The rate per second in which users are spawned. Primarily used together with --headless",
env_var="LOCUST_SPAWN_RATE",
)
parser.add_argument(
"--hatch-rate",
env_var="LOCUST_HATCH_RATE",
type=float,
default=0,
help=configargparse.SUPPRESS,
)
parser.add_argument(
"-t",
"--run-time",
help="Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --headless",
env_var="LOCUST_RUN_TIME",
)
parser.add_argument(
"-l",
"--list",
action="store_true",
dest="list_commands",
help="Show list of possible User classes and exit",
)
web_ui_group = parser.add_argument_group("Web UI options")
web_ui_group.add_argument(
"--web-host",
default="",
help="Host to bind the web interface to. Defaults to '*' (all interfaces)",
env_var="LOCUST_WEB_HOST",
)
web_ui_group.add_argument(
"--web-port",
"-P",
type=int,
default=8089,
help="Port on which to run web host",
env_var="LOCUST_WEB_PORT",
)
web_ui_group.add_argument(
"--headless",
action="store_true",
help="Disable the web interface, and instead start the load test immediately. Requires -u and -t to be specified.",
env_var="LOCUST_HEADLESS",
)
web_ui_group.add_argument(
"--web-auth",
type=str,
dest="web_auth",
default=None,
help="Turn on Basic Auth for the web interface. Should be supplied in the following format: username:password",
env_var="LOCUST_WEB_AUTH",
)
web_ui_group.add_argument(
"--tls-cert",
default="",
help="Optional path to TLS certificate to use to serve over HTTPS",
env_var="LOCUST_TLS_CERT",
)
web_ui_group.add_argument(
"--tls-key",
default="",
help="Optional path to TLS private key to use to serve over HTTPS",
env_var="LOCUST_TLS_KEY",
)
master_group = parser.add_argument_group(
"Master options",
"Options for running a Locust Master node when running Locust distributed. A Master node need Worker nodes that connect to it before it can run load tests.",
)
# if locust should be run in distributed mode as master
master_group.add_argument(
"--master",
action="store_true",
help="Set locust to run in distributed mode with this process as master",
env_var="LOCUST_MODE_MASTER",
)
master_group.add_argument(
"--master-bind-host",
default="*",
help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces).",
env_var="LOCUST_MASTER_BIND_HOST",
)
master_group.add_argument(
"--master-bind-port",
type=int,
default=5557,
help="Port that locust master should bind to. Only used when running with --master. Defaults to 5557.",
env_var="LOCUST_MASTER_BIND_PORT",
)
master_group.add_argument(
"--expect-workers",
type=int,
default=1,
help="How many workers master should expect to connect before starting the test (only when --headless used).",
env_var="LOCUST_EXPECT_WORKERS",
)
master_group.add_argument(
"--expect-slaves",
action="store_true",
help=configargparse.SUPPRESS,
)
worker_group = parser.add_argument_group(
"Worker options",
textwrap.dedent(
"""
Options for running a Locust Worker node when running Locust distributed.
Only the LOCUSTFILE (-f option) need to be specified when starting a Worker, since other options such as -u, -r, -t are specified on the Master node.
"""
),
)
# if locust should be run in distributed mode as worker
worker_group.add_argument(
"--worker",
action="store_true",
help="Set locust to run in distributed mode with this process as worker",
env_var="LOCUST_MODE_WORKER",
)
worker_group.add_argument(
"--slave",
action="store_true",
help=configargparse.SUPPRESS,
)
# master host options
worker_group.add_argument(
"--master-host",
default="127.0.0.1",
help="Host or IP address of locust master for distributed load testing. Only used when running with --worker. Defaults to 127.0.0.1.",
env_var="LOCUST_MASTER_NODE_HOST",
metavar="MASTER_NODE_HOST",
)
worker_group.add_argument(
"--master-port",
type=int,
default=5557,
help="The port to connect to that is used by the locust master for distributed load testing. Only used when running with --worker. Defaults to 5557.",
env_var="LOCUST_MASTER_NODE_PORT",
metavar="MASTER_NODE_PORT",
)
tag_group = parser.add_argument_group(
"Tag options",
"Locust tasks can be tagged using the @tag decorator. These options let specify which tasks to include or exclude during a test.",
)
tag_group.add_argument(
"-T",
"--tags",
nargs="*",
metavar="TAG",
env_var="LOCUST_TAGS",
help="List of tags to include in the test, so only tasks with any matching tags will be executed",
)
tag_group.add_argument(
"-E",
"--exclude-tags",
nargs="*",
metavar="TAG",
env_var="LOCUST_EXCLUDE_TAGS",
help="List of tags to exclude from the test, so only tasks with no matching tags will be executed",
)
stats_group = parser.add_argument_group("Request statistics options")
stats_group.add_argument(
"--csv", # Name repeated in 'parse_options'
dest="csv_prefix",
help="Store current request stats to files in CSV format. Setting this option will generate three files: [CSV_PREFIX]_stats.csv, [CSV_PREFIX]_stats_history.csv and [CSV_PREFIX]_failures.csv",
env_var="LOCUST_CSV",
)
stats_group.add_argument(
"--csv-full-history", # Name repeated in 'parse_options'
action="store_true",
default=False,
dest="stats_history_enabled",
help="Store each stats entry in CSV format to _stats_history.csv file. You must also specify the '--csv' argument to enable this.",
env_var="LOCUST_CSV_FULL_HISTORY",
)
stats_group.add_argument(
"--print-stats",
action="store_true",
help="Print stats in the console",
env_var="LOCUST_PRINT_STATS",
)
stats_group.add_argument(
"--only-summary",
action="store_true",
help="Only print the summary stats",
env_var="LOCUST_ONLY_SUMMARY",
)
stats_group.add_argument(
"--reset-stats",
action="store_true",
help="Reset statistics once spawning has been completed. Should be set on both master and workers when running in distributed mode",
env_var="LOCUST_RESET_STATS",
)
log_group = parser.add_argument_group("Logging options")
log_group.add_argument(
"--skip-log-setup",
action="store_true",
dest="skip_log_setup",
default=False,
help="Disable Locust's logging setup. Instead, the configuration is provided by the Locust test or Python defaults.",
env_var="LOCUST_SKIP_LOG_SETUP",
)
log_group.add_argument(
"--loglevel",
"-L",
default="INFO",
help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.",
env_var="LOCUST_LOGLEVEL",
)
log_group.add_argument(
"--logfile",
help="Path to log file. If not set, log will go to stdout/stderr",
env_var="LOCUST_LOGFILE",
)
step_load_group = parser.add_argument_group("Step load options")
step_load_group.add_argument("--step-load", action="store_true", help=configargparse.SUPPRESS)
step_load_group.add_argument("--step-users", type=int, help=configargparse.SUPPRESS)
step_load_group.add_argument("--step-clients", action="store_true", help=configargparse.SUPPRESS)
step_load_group.add_argument("--step-time", help=configargparse.SUPPRESS)
other_group = parser.add_argument_group("Other options")
other_group.add_argument(
"--show-task-ratio", action="store_true", help="Print table of the User classes' task execution ratio"
)
other_group.add_argument(
"--show-task-ratio-json", action="store_true", help="Print json data of the User classes' task execution ratio"
)
# optparse gives you --version but we have to do it ourselves to get -V too
other_group.add_argument(
"--version",
"-V",
action="version",
help="Show program's version number and exit",
version="%(prog)s {}".format(version),
)
other_group.add_argument(
"--exit-code-on-error",
type=int,
default=1,
help="Sets the process exit code to use when a test result contain any failure or error",
env_var="LOCUST_EXIT_CODE_ON_ERROR",
)
other_group.add_argument(
"-s",
"--stop-timeout",
action="store",
type=int,
dest="stop_timeout",
default=None,
help="Number of seconds to wait for a simulated user to complete any executing task before exiting. Default is to terminate immediately. This parameter only needs to be specified for the master process when running Locust distributed.",
env_var="LOCUST_STOP_TIMEOUT",
)
user_classes_group = parser.add_argument_group("User classes")
user_classes_group.add_argument(
"user_classes",
nargs="*",
metavar="UserClass",
help="Optionally specify which User classes that should be used (available User classes can be listed with -l or --list)",
)
def get_parser(default_config_files=DEFAULT_CONFIG_FILES):
# get a parser that is only able to parse the -f argument
parser = get_empty_argument_parser(add_help=True, default_config_files=default_config_files)
# add all the other supported arguments
setup_parser_arguments(parser)
# fire event to provide a hook for locustscripts and plugins to add command line arguments
locust.events.init_command_line_parser.fire(parser=parser)
return parser
def parse_options(args=None):
parser = get_parser()
parsed_opts = parser.parse_args(args=args)
if parsed_opts.stats_history_enabled and (parsed_opts.csv_prefix is None):
parser.error("'--csv-full-history' requires '--csv'.")
return parsed_opts
| 35.200924 | 244 | 0.637449 |
c99407499393329c581e7346e562f93b61523886 | 10,728 | py | Python | tests/components/somfy_mylink/test_config_flow.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 1 | 2021-07-08T20:09:55.000Z | 2021-07-08T20:09:55.000Z | tests/components/somfy_mylink/test_config_flow.py | pcaston/core | e74d946cef7a9d4e232ae9e0ba150d18018cfe33 | [
"Apache-2.0"
] | 47 | 2021-02-21T23:43:07.000Z | 2022-03-31T06:07:10.000Z | tests/components/somfy_mylink/test_config_flow.py | OpenPeerPower/core | f673dfac9f2d0c48fa30af37b0a99df9dd6640ee | [
"Apache-2.0"
] | null | null | null | """Test the Somfy MyLink config flow."""
import asyncio
from unittest.mock import patch
import pytest
from openpeerpower import config_entries, data_entry_flow, setup
from openpeerpower.components.dhcp import HOSTNAME, IP_ADDRESS, MAC_ADDRESS
from openpeerpower.components.somfy_mylink.const import (
CONF_REVERSED_TARGET_IDS,
CONF_SYSTEM_ID,
DOMAIN,
)
from openpeerpower.const import CONF_HOST, CONF_PORT
from tests.common import MockConfigEntry
async def test_form_user(opp):
"""Test we get the form."""
await setup.async_setup_component(opp, "persistent_notification", {})
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"openpeerpower.components.somfy_mylink.config_flow.SomfyMyLinkSynergy.status_info",
return_value={"any": "data"},
), patch(
"openpeerpower.components.somfy_mylink.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_PORT: 1234,
CONF_SYSTEM_ID: "456",
},
)
await opp.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "MyLink 1.1.1.1"
assert result2["data"] == {
CONF_HOST: "1.1.1.1",
CONF_PORT: 1234,
CONF_SYSTEM_ID: "456",
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_already_configured(opp):
"""Test we abort if already configured."""
await setup.async_setup_component(opp, "persistent_notification", {})
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "1.1.1.1", CONF_PORT: 12, CONF_SYSTEM_ID: 46},
)
config_entry.add_to_opp(opp)
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"openpeerpower.components.somfy_mylink.config_flow.SomfyMyLinkSynergy.status_info",
return_value={"any": "data"},
), patch(
"openpeerpower.components.somfy_mylink.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_PORT: 1234,
CONF_SYSTEM_ID: "456",
},
)
await opp.async_block_till_done()
assert result2["type"] == "abort"
assert len(mock_setup_entry.mock_calls) == 0
async def test_form_invalid_auth(opp):
"""Test we handle invalid auth."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"openpeerpower.components.somfy_mylink.config_flow.SomfyMyLinkSynergy.status_info",
return_value={
"jsonrpc": "2.0",
"error": {"code": -32652, "message": "Invalid auth"},
"id": 818,
},
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_PORT: 1234,
CONF_SYSTEM_ID: "456",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(opp):
"""Test we handle cannot connect error."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"openpeerpower.components.somfy_mylink.config_flow.SomfyMyLinkSynergy.status_info",
side_effect=asyncio.TimeoutError,
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_PORT: 1234,
CONF_SYSTEM_ID: "456",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(opp):
"""Test we handle broad exception."""
result = await opp.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"openpeerpower.components.somfy_mylink.config_flow.SomfyMyLinkSynergy.status_info",
side_effect=ValueError,
):
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_PORT: 1234,
CONF_SYSTEM_ID: "456",
},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_options_not_loaded(opp):
"""Test options will not display until loaded."""
await setup.async_setup_component(opp, "persistent_notification", {})
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "1.1.1.1", CONF_PORT: 12, CONF_SYSTEM_ID: "46"},
)
config_entry.add_to_opp(opp)
with patch(
"openpeerpower.components.somfy_mylink.SomfyMyLinkSynergy.status_info",
return_value={"result": []},
):
result = await opp.config_entries.options.async_init(config_entry.entry_id)
await opp.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
@pytest.mark.parametrize("reversed", [True, False])
async def test_options_with_targets(opp, reversed):
"""Test we can configure reverse for a target."""
await setup.async_setup_component(opp, "persistent_notification", {})
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "1.1.1.1", CONF_PORT: 12, CONF_SYSTEM_ID: "46"},
)
config_entry.add_to_opp(opp)
with patch(
"openpeerpower.components.somfy_mylink.SomfyMyLinkSynergy.status_info",
return_value={
"result": [
{
"targetID": "a",
"name": "Master Window",
"type": 0,
}
]
},
):
assert await opp.config_entries.async_setup(config_entry.entry_id)
await opp.async_block_till_done()
result = await opp.config_entries.options.async_init(config_entry.entry_id)
await opp.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result2 = await opp.config_entries.options.async_configure(
result["flow_id"],
user_input={"target_id": "a"},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_FORM
result3 = await opp.config_entries.options.async_configure(
result2["flow_id"],
user_input={"reverse": reversed},
)
assert result3["type"] == data_entry_flow.RESULT_TYPE_FORM
result4 = await opp.config_entries.options.async_configure(
result3["flow_id"],
user_input={"target_id": None},
)
assert result4["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONF_REVERSED_TARGET_IDS: {"a": reversed},
}
await opp.async_block_till_done()
async def test_form_user_already_configured_from_dhcp(opp):
"""Test we abort if already configured from dhcp."""
await setup.async_setup_component(opp, "persistent_notification", {})
config_entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "1.1.1.1", CONF_PORT: 12, CONF_SYSTEM_ID: 46},
)
config_entry.add_to_opp(opp)
with patch(
"openpeerpower.components.somfy_mylink.config_flow.SomfyMyLinkSynergy.status_info",
return_value={"any": "data"},
), patch(
"openpeerpower.components.somfy_mylink.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
IP_ADDRESS: "1.1.1.1",
MAC_ADDRESS: "AA:BB:CC:DD:EE:FF",
HOSTNAME: "somfy_eeff",
},
)
await opp.async_block_till_done()
assert result["type"] == "abort"
assert len(mock_setup_entry.mock_calls) == 0
async def test_already_configured_with_ignored(opp):
"""Test ignored entries do not break checking for existing entries."""
await setup.async_setup_component(opp, "persistent_notification", {})
config_entry = MockConfigEntry(
domain=DOMAIN, data={}, source=config_entries.SOURCE_IGNORE
)
config_entry.add_to_opp(opp)
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
IP_ADDRESS: "1.1.1.1",
MAC_ADDRESS: "AA:BB:CC:DD:EE:FF",
HOSTNAME: "somfy_eeff",
},
)
assert result["type"] == "form"
async def test_dhcp_discovery(opp):
"""Test we can process the discovery from dhcp."""
await setup.async_setup_component(opp, "persistent_notification", {})
result = await opp.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DHCP},
data={
IP_ADDRESS: "1.1.1.1",
MAC_ADDRESS: "AA:BB:CC:DD:EE:FF",
HOSTNAME: "somfy_eeff",
},
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"openpeerpower.components.somfy_mylink.config_flow.SomfyMyLinkSynergy.status_info",
return_value={"any": "data"},
), patch(
"openpeerpower.components.somfy_mylink.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await opp.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "1.1.1.1",
CONF_PORT: 1234,
CONF_SYSTEM_ID: "456",
},
)
await opp.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "MyLink 1.1.1.1"
assert result2["data"] == {
CONF_HOST: "1.1.1.1",
CONF_PORT: 1234,
CONF_SYSTEM_ID: "456",
}
assert len(mock_setup_entry.mock_calls) == 1
| 32.216216 | 91 | 0.617263 |
1730d6f6198b07db49941fd308c745f41b3c402e | 418 | py | Python | test/__init__.py | leiyangleon/RAiDER | 40c083a23ded02470939318daba36f3c2a25e52b | [
"Apache-2.0"
] | 2 | 2020-04-16T16:06:46.000Z | 2022-02-15T13:37:43.000Z | test/__init__.py | leiyangleon/RAiDER | 40c083a23ded02470939318daba36f3c2a25e52b | [
"Apache-2.0"
] | null | null | null | test/__init__.py | leiyangleon/RAiDER | 40c083a23ded02470939318daba36f3c2a25e52b | [
"Apache-2.0"
] | null | null | null | import os
from contextlib import contextmanager
from pathlib import Path
test_dir = Path(__file__).parents[0]
@contextmanager
def pushd(dir):
"""
Change the current working directory within a context.
"""
prevdir = os.getcwd()
os.chdir(dir)
yield
os.chdir(prevdir)
TEST_DIR = test_dir.absolute()
DATA_DIR = os.path.join(TEST_DIR, "data")
GEOM_DIR = os.path.join(TEST_DIR, 'test_geom')
| 19 | 58 | 0.705742 |
48010f4e4070325e93474fd3a764bee2721da3e1 | 4,801 | py | Python | tests/system/step_defs/test_model_steps.py | leferrad/meli_datachallenge2019 | 4efb624b8ddea9e7044d1bac159ff03e5b13a289 | [
"MIT"
] | 1 | 2022-02-08T03:28:23.000Z | 2022-02-08T03:28:23.000Z | tests/system/step_defs/test_model_steps.py | leferrad/meli_datachallenge2019 | 4efb624b8ddea9e7044d1bac159ff03e5b13a289 | [
"MIT"
] | null | null | null | tests/system/step_defs/test_model_steps.py | leferrad/meli_datachallenge2019 | 4efb624b8ddea9e7044d1bac159ff03e5b13a289 | [
"MIT"
] | null | null | null | """Steps for model.feature"""
import json
import os
import subprocess
import pytest
from pytest_bdd import given, when, then, scenarios, parsers
from melidatachall19.utils import load_profile
from .utils.checks import MODELING_ACCEPTANCE_THRESHOLDS
from .utils.paths import PROJECT_PATH
scenarios('../features/model.feature')
PROFILE = "profile_sampled_data"
@pytest.fixture
def path_to_profile():
yield os.path.join(PROJECT_PATH, "profiles", f"{PROFILE}.yml")
@given(parsers.parse("the execution profile '{profile}'"))
def given_profile(profile):
global PROFILE
PROFILE = profile
return path_to_profile
@when('the modeling script is executed')
def modeling_script_executed(path_to_profile):
tools_script = os.path.join(PROJECT_PATH, "tools", "script.sh")
args = os.path.join(PROJECT_PATH, f'scripts/modeling.py -p {path_to_profile}')
cmd = f"{tools_script} {args}"
out = subprocess.run(cmd, shell=True)
assert out.returncode == 0, f"Execution of command '{cmd}' returned {out.returncode}. " \
f"Stderr: {out.stderr}"
@then('the model and results are saved')
def model_results_saved(path_to_profile):
# Folder with models
folder = os.path.join(PROJECT_PATH, "models")
out = os.listdir(folder)
profile = load_profile(path_to_profile)
for k, path in profile["paths"]["model"].items():
model_filename = path.split("/")[-1]
assert model_filename in out, f"The model {model_filename} was not found"
# Folder with results
folder = os.path.join(folder, "results")
out = os.listdir(folder)
# TRAIN
for k, path in profile["paths"]["results"]["train"].items():
results_filename = path.split("/")[-1]
assert results_filename in out, f"TRAIN results {results_filename} not found"
# VALID
for k, path in profile["paths"]["results"]["valid"].items():
results_filename = path.split("/")[-1]
assert results_filename in out, f"VALID results {results_filename} not found"
@then('the train and valid results pass the acceptance criteria')
def train_valid_results_pass_acceptance_criteria(path_to_profile):
profile = load_profile(path_to_profile)
# Folder with results
folder = os.path.join(PROJECT_PATH, "models", "results")
# TRAIN
for model, path in profile["paths"]["results"]["train"].items():
with open(os.path.join(folder, "train", path), "r") as f:
results_train = json.load(f)
for k, v in MODELING_ACCEPTANCE_THRESHOLDS["train"].items():
assert results_train[k] >= v, \
f"Acceptance criteria '{k} >= {v}' was not passed " \
f"for TRAIN results of model {model}"
# VALID
for model, path in profile["paths"]["results"]["valid"].items():
with open(os.path.join(folder, "valid", path), "r") as f:
results_train = json.load(f)
for k, v in MODELING_ACCEPTANCE_THRESHOLDS["valid"].items():
assert results_train[k] >= v, \
f"Acceptance criteria '{k} >= {v}' was not passed " \
f"for VALID results of model {model}"
@when('the evaluation script is executed')
def evaluation_script_executed(path_to_profile):
tools_script = os.path.join(PROJECT_PATH, "tools", "script.sh")
args = os.path.join(PROJECT_PATH, f'scripts/evaluation.py -p {path_to_profile}')
cmd = f"{tools_script} {args}"
out = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
assert out.returncode == 0, f"Execution of command '{cmd}' returned {out.returncode}. " \
f"Stderr: {out.stderr}"
@then('the test results are saved')
def test_results_saved(path_to_profile):
profile = load_profile(path_to_profile)
# Folder with results
folder = os.path.join(PROJECT_PATH, "models", "results")
out = os.listdir(folder)
# TEST
for k, path in profile["paths"]["results"]["test"].items():
results_filename = path.split("/")[-1]
assert results_filename in out, f"TEST results {results_filename} not found"
@then('the test results pass the acceptance criteria')
def test_results_pass_acceptance_criteria(path_to_profile):
profile = load_profile(path_to_profile)
# Folder with results
folder = os.path.join(PROJECT_PATH, "models", "results")
# TEST
for model, path in profile["paths"]["results"]["test"].items():
with open(os.path.join(folder, "test", path), "r") as f:
results_train = json.load(f)
for k, v in MODELING_ACCEPTANCE_THRESHOLDS["test"].items():
assert results_train[k] >= v, \
f"Acceptance criteria '{k} >= {v}' was not passed " \
f"for TEST results of model {model}"
| 34.539568 | 93 | 0.659238 |
b21709043fb1f41939213baad1eed24ffdd5cf5f | 14,861 | py | Python | intrinioAPI/main.py | mikelhsia/initrinioAPI | 114b0641ce98cf1b883f138f44db6f66058b3c43 | [
"MIT"
] | null | null | null | intrinioAPI/main.py | mikelhsia/initrinioAPI | 114b0641ce98cf1b883f138f44db6f66058b3c43 | [
"MIT"
] | 2 | 2020-06-10T03:23:47.000Z | 2020-06-10T04:04:10.000Z | intrinioAPI/main.py | mikelhsia/intrinioAPI | 114b0641ce98cf1b883f138f44db6f66058b3c43 | [
"MIT"
] | null | null | null | from datetime import datetime, date, timedelta
import time
import pandas as pd
import numpy as np
from intrinio_sdk.rest import ApiException
from urllib3.exceptions import MaxRetryError
from sqlalchemy import func, or_, and_
from sqlalchemy.orm import sessionmaker
from models import Company, Security, SecurityPrice, StockAdjustment, Exchange
import setup_db_environment
import setup_intrinio_environment
###########################
# BULK_API_CALL_LIMIT will need to adjust to what make sense for the subscription
BULK_API_CALL_LIMIT = 60
###########################
# Setup DB
# Setup environment and create a session
db = setup_db_environment.get_database()
Session = sessionmaker(bind=db)
session = Session()
intrinio_sdk = setup_intrinio_environment.get_connection()
production = setup_intrinio_environment.using_production()
# If production, add SP500 securities not already in database
# If sandbox, add all securities available not in database
query = session.query(Security).statement
existing_securities = pd.read_sql(query, db, index_col='id')
security_api = intrinio_sdk.SecurityApi()
company_api = intrinio_sdk.CompanyApi()
###########################
# Functions
def get_all_exchanges(next_page=''):
# https://docs.intrinio.com/documentation/python/get_all_stock_exchanges_v2
page_size = 100
try:
api_response = stock_exchange_api.get_all_stock_exchanges(
page_size=page_size
)
except (ApiException, MaxRetryError) as e:
print("Exception: StockExchangeApi->get_all_stock_exchanges: {e}\r\n".format(e=e))
return None
return api_response
def get_all_securities(delisted='', next_page=''):
# https://docs.intrinio.com/documentation/python/get_all_securities_v2
active = True
delisted = False
currency = 'USD'
composite_mic = 'USCOMP'
next_page = next_page
try:
api_response = security_api.get_all_securities(
active=active,
delisted=delisted,
currency=currency,
composite_mic=composite_mic
)
except (ApiException, MaxRetryError) as e:
print("Exception: SecurityApi->get_all_securities: {e}\r\n".format(e=e))
return None
return api_response
def get_security(identifier):
# https://docs.intrinio.com/documentation/python/get_security_by_id_v2
identifier = identifier
try:
api_response = security_api.get_security_by_id(
identifier
)
except (ApiException, MaxRetryError) as e:
print("Error trying to get data for {identifier}".format(identifier=identifier))
print("Exception: securityApi->get_security_by_id: {e}\r\n".format(e=e))
return None
return api_response
def get_security_prices(identifier, start_date='', next_page=''):
# https://docs.intrinio.com/documentation/python/get_security_stock_prices_v2
start_date = start_date
frequency = 'daily'
page_size = 10000 if not start_date else 100
try:
api_response = security_api.get_security_stock_prices(
identifier,
start_date=start_date,
frequency=frequency,
page_size=page_size,
next_page=next_page
)
except (ApiException, MaxRetryError) as e:
print("Exception: SecurityApi->get_security_historical_data: {e}\r\n".format(e=e))
return None
if (page_size > 100):
time.sleep(BULK_API_CALL_LIMIT)
return api_response
def get_company(identifier):
# https://docs.intrinio.com/documentation/python/get_company_v2
identifier = identifier
try:
api_response = company_api.get_company(
identifier
)
except (ApiException, MaxRetryError) as e:
print("Exception: CompanyApi->get_company: {e}\r\n".format(e=e))
return None
return api_response
###########################
# Main
if production:
# Get S&P500 constituents
sp500_constituents = pd.read_csv(
"sp500_constituents.csv",
dtype={'cik': object}
)
securities_to_add = sp500_constituents[-sp500_constituents['ticker'].isin(existing_securities['ticker'])]
# Lookup and compare ticker to company name
missing_securities = []
strings_to_remove = ['limited', 'ltd', 'incorporated', 'inc', '.']
for index, sp500_constituent in securities_to_add.iterrows():
sp500_constituents.replace(np.nan, '', inplace=True)
name = sp500_constituent['name'].lower()
ticker = sp500_constituent['ticker'].upper()
cik = sp500_constituent['cik']
if cik:
try:
api_response = company_api.search_companies(cik)
except (ApiException, MaxRetryError) as e:
print("Exception: CompanyApi->search_companies {e}\r\n".format(e=e))
continue
if api_response:
for company in api_response.companies:
if company.ticker and company.ticker.upper == ticker:
name = company.name
break
else:
for string in strings_to_remove:
name = name.replace(string, '')
query = name + ' ' + ticker
try:
api_response = security_api.search_securities(query)
except (ApiException, MaxRetryError) as e:
print("Exception when calling companyApi->search_companies: {e}\r\n".format(e=e))
continue
if api_response:
match_found = False
for security in api_response.securities:
if security.ticker and\
security.code == 'EQS' and\
security.ticker.upper() == ticker.upper():
match_found = True
api_response = get_security(security.id)
if api_response:
stock = Security(
id_intrinio=api_response.id,
code=api_response.code,
currency=api_response.currency,
ticker=api_response.ticker,
name=api_response.name,
figi=api_response.figi,
composite_figi=api_response.composite_figi,
share_class_figi=api_response.share_class_figi
)
print("Adding security {name} with ticker: {ticker}".format(name=stock.name, ticker=stock.ticker))
session.add(stock)
session.commit()
break
if not match_found:
print("\nNo match found for query: {query}\n".format(query=query))
missing_securities.append(query)
else:
print("No API response for: ", query)
missing_securities.append(query)
print("There were {length} missing securities. Trying search with larger page size...".format(length=len(missing_securities)))
for query in missing_securities:
try:
api_response = security_api.search_securities(
query,
page_size=10000
)
time.sleep(BULK_API_CALL_LIMIT)
except (ApiException, MaxRetryError) as e:
print("Exception when calling CompanyApi->search_companies: {e}\r\n".format(e=e))
continue
if api_response:
match_found = False
for security in api_response.securities:
if security.ticker and \
security.code == 'EQS' and \
security.ticker.upper() == ticker.upper():
api_response = get_security(security.id)
if api_response:
stock = Security(
id_intrinio=api_response.id,
code=api_response.code,
currency=api_response.currency,
ticker=api_response.ticker,
name=api_response.name,
figi=api_response.figi,
composite_figi=api_response.composite_figi,
share_class_figi=api_response.share_class_figi
)
print(query)
print("Adding security {name} with ticker: {ticker}".format(name=stock.name, ticker=stock.ticker))
session.add(stock)
session.commit()
break
if not match_found:
print("A match was not found for query: {query}".format(query=query))
else:
print("NO API RESPONSE FOR: {query}".format(query=query))
else:
api_response = get_all_securities()
new_securities = pd.DataFrame(api_response.securities_dict)
while api_response.next_page:
api_response = get_all_securities(api_response.next_page)
page = pd.DataFrame(api_response.securities_dict)
pd.concat([new_securities, page])
columns = ['id', 'code', 'currency', 'ticker', 'name', 'figi', 'composite_figi', 'share_class_figi']
new_securities = new_securities[columns]
new_securities.rename(columns={'id': 'id_intrinio'}, inplace=True)
securities_to_add = new_securities[-new_securities['figi'].isin(existing_securities['figi'])]
if len(securities_to_add) > 0:
print("Adding {length} securities".format(length=len(securities_to_add)))
session.bulk_insert_mappings(
Security,
securities_to_add.to_dict(orient='records')
)
session.commit()
else:
print("No securities added.")
# Get Exchanges
stock_exchange_api = intrinio_sdk.StockExchangeApi()
api_response = get_all_exchanges()
exchanges = pd.DataFrame(api_response.stock_exchanges_dict)
while api_response.next_page:
api_response = get_all_exchanges(api_response.next_page)
page = pd.DataFrame(api_response.stock_exchanges_dict)
pd.concat([exchanges, page])
exchanges.rename(
columns={'id': 'id_intrinio'},
inplace=True
)
query = session.query(Exchange).statement
existing_exchanges = pd.read_sql(query, db, index_col='id')
exchanges = exchanges[-exchanges['mic'].isin(existing_exchanges['mic'])]
if len(exchanges) > 0:
print("Inserting {length} exchanges.".format(length=len(exchanges)))
session.bulk_insert_mappings(Exchange, exchanges.to_dict(orient="records"))
session.commit()
else:
print("No exchanges added.")
# Update securities with exchange
securities = session.query(Security).outerjoin(Exchange).filter(Security.exchange_id is None)
if securities.count() > 0:
query = session.query(Exchange).statement
exchanges = pd.read_sql(query, db, index_col='id')
for security in securities:
api_response = get_security(security.id_intrinio)
if api_response:
security.exchange_id = int(exchanges[exchanges['mic'] == api_response.listing_exchange_mic].index[0])
print("Updating {count} securities with an exchange.".format(count=securities.count()))
session.commit()
# Get Companies
query = session.query(Security)\
.outerjoin(Company)\
.filter(
and_(
Company.security_id == None,
Security.has_missing_company.isnot(True)
)
).statement
securities_without_company = pd.read_sql(query, db, index_col='id')
securities_without_company_data = []
for index, security in securities_without_company.iterrows():
api_response = get_company(security.ticker)
if not api_response:
securities_without_company_data.append(security.ticker)
else:
company = Company(
name=api_response.name,
cik=api_response.cik,
description=api_response.short_description[:2000] if len(api_response.short_description) > 2000 else api_response.short_description,
company_url=api_response.company_url,
sic=api_response.sic,
employees=api_response.employees,
sector=api_response.sector,
industry_category=api_response.industry_category,
industry_group=api_response.industry_group,
security_id=index
)
print("Addming company {name}".format(name=api_response.name))
session.add(company)
session.commit()
length = (len(securities_without_company) - len(securities_without_company_data))
print("Added {length} companies.".format(length=length))
if len(securities_without_company_data) > 0:
securities_without_company = securities_without_company.loc[securities_without_company['ticker'].isin(securities_without_company_data)]
securities_without_company['has_missing_company'] = True
securities_without_company['id'] = securities_without_company.index
session.bulk_update_mappings(Security, securities_without_company.to_dict(orient="records"))
session.commit()
print("There were {length} new rows that did not have an associated company record".format(length=len(securities_without_company_data)))
# Get Updated Prices
query = session.query(Security, func.max(SecurityPrice.date).label("latest_date")).outerjoin(SecurityPrice).group_by(Security.id).filter(Security.has_invalid_data.isnot(True)).statement
securities = pd.read_sql(query, db)
invalid_data_ids = []
for index, security in securities.iterrows():
start_date = security.latest_date + timedelta(days=1) if security.latest_date else None
api_response = get_security_prices(security.figi, start_date)
if api_response:
stock_prices = pd.DataFrame(api_response.stock_prices_dict)
stock_prices['security_id'] = security.id
while api_response.next_page:
api_response = get_security_prices(security.figi, start_date, api_response.next_page)
page = pd.DataFrame(api_response.stock_prices_dict)
page['security_id'] = security.id
pd.concat([stock_prices, page])
stock_prices_to_add = stock_prices[-stock_prices.security_id.isin(invalid_data_ids)]
stock_prices_to_add.replace({pd.np.nan: None}, inplace=True)
if len(stock_prices_to_add) > 0:
start_date = stock_prices_to_add['date'].min()
end_date = stock_prices_to_add['date'].max()
print("Ticker {ticker}: Adding {length} rows to the security prices database with dates between {start_date} - {end_date}".format(
ticker=security.ticker,
length=len(stock_prices_to_add),
start_date=start_date,
end_date=end_date
))
session.bulk_insert_mappings(SecurityPrice, stock_prices_to_add.to_dict(orient="records"))
session.commit() | 39.107895 | 185 | 0.645111 |
4bf26de3bff3dad98d99f72248d6cfb8ef234fe9 | 554 | py | Python | dnsdb_common/dal/models/view_records.py | baiyongjie/open_dnsdb | b5b7a69e439080cd6d85b692825ed56cd8f5c80a | [
"Apache-2.0"
] | 378 | 2019-01-22T02:16:28.000Z | 2022-03-31T01:34:27.000Z | dnsdb_common/dal/models/view_records.py | baiyongjie/open_dnsdb | b5b7a69e439080cd6d85b692825ed56cd8f5c80a | [
"Apache-2.0"
] | 51 | 2019-01-23T03:15:16.000Z | 2021-05-08T02:22:23.000Z | dnsdb_common/dal/models/view_records.py | baiyongjie/open_dnsdb | b5b7a69e439080cd6d85b692825ed56cd8f5c80a | [
"Apache-2.0"
] | 139 | 2019-01-22T02:43:39.000Z | 2022-02-21T09:16:01.000Z | # -*- coding: utf-8 -*-
from . import AuditTimeMixin
from .. import db
class ViewRecords(db.Model, AuditTimeMixin):
__tablename__ = 'tb_view_record'
id = db.Column(db.Integer, primary_key=True)
domain_name = db.Column(db.String(256), nullable=False)
record = db.Column(db.String(256), nullable=False)
record_type = db.Column(db.String(32), nullable=False)
ttl = db.Column(db.Integer, nullable=False, default=60)
property = db.Column(db.String(256), default='none')
zone_name = db.Column(db.String(50), nullable=False)
| 32.588235 | 59 | 0.698556 |
06db365af14fd1855eb42849eb80e4abc0d8a2df | 77 | py | Python | taiga/utils.py | erikw/python-taiga | 006fce1c793a345cc4464ca7a1bfdd1beedb7744 | [
"MIT"
] | null | null | null | taiga/utils.py | erikw/python-taiga | 006fce1c793a345cc4464ca7a1bfdd1beedb7744 | [
"MIT"
] | 1 | 2018-05-27T11:37:47.000Z | 2018-05-27T11:41:49.000Z | taiga/utils.py | erikw/python-taiga | 006fce1c793a345cc4464ca7a1bfdd1beedb7744 | [
"MIT"
] | null | null | null |
def urljoin(*parts):
return '/'.join(part.strip('/') for part in parts)
| 19.25 | 54 | 0.623377 |
3aad506101de377763dbd0ee763386cd4cf8d3b2 | 7,680 | py | Python | batchregister.py | aaabbb200909/applconn | 18568a16d94e5b47950cbb34f9beb740e73ca2a7 | [
"Apache-2.0"
] | null | null | null | batchregister.py | aaabbb200909/applconn | 18568a16d94e5b47950cbb34f9beb740e73ca2a7 | [
"Apache-2.0"
] | null | null | null | batchregister.py | aaabbb200909/applconn | 18568a16d94e5b47950cbb34f9beb740e73ca2a7 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import glob
import json
import requests
import networkx as nx
from networkx.readwrite import json_graph
import settings
json_filepath=settings.json_filepath
elasticsearchurl=settings.elasticsearchurl
rsyncgitpath=settings.rsyncgitpath
def import_rsyncgit(G):
'''rsyncgit'''
l=os.popen('ls '+rsyncgitpath).readlines()
for st in l:
tmp=st.strip()
G.add_edge_from(tmp)
def import_pupput_yaml(G):
'''puppet yaml'''
puppetservername='centos-virt11.jp.example.org'
puppetyamlpath=rsyncgitpath+puppetservername+'/var/lib/puppet/yaml/facts/'
nodes=os.popen('ls '+puppetyamlpath).readlines()
for n in nodes:
# Add Hostname
n=n.strip()
nodename=n[:-5] # Strip final '.yaml'
r.sadd('nodes', nodename)
f=file(puppetyamlpath+n)
l=f.readlines()
f.close()
for st in l:
#print st
st=st.strip()
if (st.find('osfamily') > -1):
tmp=st.split(': ')
#print (n,tmp[1])
r.sadd('nodes', tmp[1])
r.sadd('edges', (nodename,tmp[1]))
elif (st.find('ipaddress_') > -1 and (not st.find('ipaddress_lo') > -1)):
# ipaddress_tun0: "10.200.200.6"
tmp=st.split(': ')
ipaddr=tmp[1].replace('"', '')
interfacename=tmp[0].split('_')[1]
nodename_iterfacename=nodename+'_'+interfacename
r.sadd('nodes', nodename_iterfacename)
r.sadd('edges', (nodename_iterfacename,ipaddr))
r.sadd('edges', (nodename,nodename_iterfacename))
def import_libvirt(G):
'''libvirt'''
pass
def import_ansible_facts(G):
'''import ansible facts
create-fact command: ansible all -i /tmp/hosts -m setup -t /tmp/ansible_facts
'''
ansible_facts_dir='/tmp/ansible_facts/'
for factpath in glob.glob(ansible_facts_dir+'*'):
nodename=os.path.basename(factpath)
with open(factpath) as f:
js=json.loads(f.read())
# remove some info which ES don't like
js["ansible_facts"]["ansible_python"]["version_info"]=[]
G.add_node(nodename, js, searchtag='All')
def import_haproxy(G):
'''import haproxy.cfg
listen main1081
bind *:1081
server server1 172.17.0.4:1081
'''
haproxyfilepaths=glob.glob(rsyncgitpath + "/*/etc/haproxy/haproxy.cfg")
for haproxycfgpath in haproxyfilepaths:
nodename=haproxycfgpath.split("/")[-4]
with open(haproxycfgpath) as f:
l=f.readlines()
apps=[]
for st in l:
st = st.rstrip()
tmp=st.split()
if (st.find("listen") > -1):
app={}
app["name"]=tmp[1]
elif (st.find("bind") > -1):
app["bind"]=tmp[1]
elif (st.find(" server ") > -1):
backend_ip_port=tmp[2]
ttmp=backend_ip_port.split(':')
#print (ttmp)
app["backend_ip"]=ttmp[0]
app["backend_port"]=ttmp[1]
#
apps.append(app)
app=None
# add to graph
G.add_node(nodename, searchtag='All')
G.add_node(nodename+"-haproxy", searchtag='Dev')
G.add_edge(nodename, nodename+"-haproxy")
for app in apps:
G.add_node(nodename+"-haproxy-"+app["name"], searchtag='Dev')
G.add_edge(nodename+"-haproxy", nodename+"-haproxy-"+app["name"])
G.add_edge(nodename+"-haproxy-"+app["name"], app["backend_ip"])
def import_testlogic(G):
G.add_node('1', searchtag='All')
G.add_node('2', searchtag='All')
G.add_edge('1','2')
#
G.add_node('172.17.0.3', searchtag='All')
G.add_node('172.17.0.4', searchtag='All')
G.add_node('172.17.0.0/24', searchtag='Ops')
G.add_node('172.17.0.3_cpu', searchtag='Ops')
#G.add_edge('172.17.0.3','172.17.0.4')
G.add_edge('172.17.0.0/24', '172.17.0.3')
G.add_edge('172.17.0.0/24', '172.17.0.4')
G.add_edge('172.17.0.3','172.17.0.3_cpu')
## add attribute
G.node['1']['color']='red' # '#ffde5e'
G.node['2']['color']='blue' # '#ff634f'
G.node['1']['href']='http://www.google.co.jp'
for n in G:
G.node[n]['name'] = n
def import_tungsten_fabric_prouterlinkentry(G):
with open ('/tmp/prouterlinkentry.json') as f:
js = json.loads (f.read())
for prouter in js:
#print (prouter['name'])
G.add_node(prouter['name'], searchtag='Net')
for link in prouter['link_table']:
#print (' ' + link['remote_system_name'])
if (prouter['role']=='spine'):
#print (prouter['name'], link['remote_system_name'])
G.add_edge (prouter['name'], link['remote_system_name'])
elif (prouter['role']=='leaf'):
G.add_edge (link['remote_system_name'], prouter['name'])
def import_tungsten_fabric_network_policy(G):
network_policies=[]
with open ('/tmp/network-policy1.json') as f:
js = json.loads (f.read())
network_policies.append(js.copy())
with open ('/tmp/network-policy2.json') as f:
js = json.loads (f.read())
network_policies.append(js.copy())
#print (network_policies)
for network_policy in network_policies:
tmp = network_policy["network_policy_entries"]["policy_rule"][0]
src_vn = tmp["src_addresses"][0]["virtual_network"]
dst_vn = tmp["dst_addresses"][0]["virtual_network"]
G.add_node(src_vn, searchtag='Sdn')
G.add_node(dst_vn, searchtag='Sdn')
service_instances = tmp["action_list"]["apply_service"]
if (len (service_instances) == 0):
G.add_edge (src_vn, dst_vn)
G.add_edge (dst_vn, src_vn)
else:
G.add_node (service_instances[0], searchtag='All')
G.add_edge (src_vn, service_instances[0])
G.add_node (service_instances[-1], searchtag='All')
G.add_edge (dst_vn, service_instances[-1])
for i in range(len(service_instances)):
if (i == len(service_instances) - 1):
break
else:
G.add_edge (service_instances[i], service_instances[i+1])
## test
G.add_node("host01", searchtag='Ops')
G.add_edge("host01", "default-domain:default-project:vn1-to-vn2")
G.add_edge("vqfx191", "default-domain:default-project:vn11")
G.add_edge("vqfx192", "default-domain:default-project:vn11")
G.add_edge("vqfx193", "default-domain:default-project:vn11")
G.add_edge("vqfx191", "default-domain:default-project:vn12")
G.add_edge("vqfx192", "default-domain:default-project:vn12")
G.add_edge("vqfx193", "default-domain:default-project:vn12")
G.add_edge("vqfx191", "default-domain:default-project:vn1")
G.add_edge("vqfx192", "default-domain:default-project:vn1")
G.add_edge("vqfx193", "default-domain:default-project:vn1")
G.add_edge("vqfx191", "default-domain:default-project:vn2")
G.add_edge("vqfx192", "default-domain:default-project:vn2")
G.add_edge("vqfx193", "default-domain:default-project:vn2")
G.add_edge("vqfx194", "host01")
G.add_edge("vqfx195", "host01")
list_import_def=settings.list_import_def
def main():
G=nx.DiGraph()
for funcname in list_import_def:
func = globals()[funcname]
func(G)
js=json_graph.node_link_data(G)
# ES output
if (settings.enable_elasticsearch):
try:
requests.delete("http://{0}/applconn/".format(elasticsearchurl))
for nodejson in js["nodes"]:
returned=requests.post('http://{0}/applconn/{1}'.format(elasticsearchurl, nodejson["id"]), data=json.dumps(nodejson))
kibanaid=json.loads(returned.content)["_id"]
nodejson["kibanaid"]=kibanaid
except(requests.exceptions.ConnectionError) as e:
print ("WARN: can't connect ES")
# json output
with open(json_filepath,'w') as f:
f.write(json.dumps(js, sort_keys=True, indent=4))
if __name__ == "__main__":
main()
| 34.439462 | 124 | 0.631641 |
f1bc422b3bc787a150ad6dab1d7ab3534bd7d2dc | 1,469 | py | Python | myhood/migrations/0002_auto_20220112_0645.py | davospots/theHood | 72e9f0ead64849d576a3728731c0c753f870f29d | [
"MIT"
] | null | null | null | myhood/migrations/0002_auto_20220112_0645.py | davospots/theHood | 72e9f0ead64849d576a3728731c0c753f870f29d | [
"MIT"
] | null | null | null | myhood/migrations/0002_auto_20220112_0645.py | davospots/theHood | 72e9f0ead64849d576a3728731c0c753f870f29d | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2022-01-12 03:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myhood', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Business',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('email', models.CharField(max_length=100)),
('description', models.CharField(max_length=100)),
],
),
migrations.AlterField(
model_name='comment',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='notification',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='post',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
migrations.AlterField(
model_name='postreport',
name='id',
field=models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| 34.162791 | 114 | 0.580667 |
d955d49f925d735ad40c30a8dc6267671793cede | 1,110 | py | Python | TracRemote/tests/test_top_level.py | weaverba137/trac-remote | 7ca15ccce9ff91413b3d84a43dc924fda0c9fdd5 | [
"BSD-3-Clause"
] | null | null | null | TracRemote/tests/test_top_level.py | weaverba137/trac-remote | 7ca15ccce9ff91413b3d84a43dc924fda0c9fdd5 | [
"BSD-3-Clause"
] | 1 | 2019-06-10T18:18:19.000Z | 2019-06-10T18:18:19.000Z | TracRemote/tests/test_top_level.py | weaverba137/trac-remote | 7ca15ccce9ff91413b3d84a43dc924fda0c9fdd5 | [
"BSD-3-Clause"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
"""
===============================
TracRemote.tests.test_top_level
===============================
Test things defined in the top level __init__.py file.
"""
import unittest
import re
import sys
from .. import __version__ as tr_version
class TestTop(unittest.TestCase):
"""Test the top-level TracRemote functions.
"""
@classmethod
def setUpClass(cls):
cls.versionre = re.compile(r'''
([0-9]+!)? # epoch
([0-9]+) # major
(\.[0-9]+)* # minor
((a|b|rc|\.post|\.dev)[0-9]+)?''',
re.X)
@classmethod
def tearDownClass(cls):
pass
def test_version(self):
"""Ensure the version conforms to PEP386/PEP440.
"""
if sys.version_info.major == 3:
self.assertRegex(tr_version, self.versionre)
else:
self.assertRegexpMatches(tr_version, self.versionre)
| 27.75 | 69 | 0.491892 |
21f1b0687c8c1b6fc12136d5765fa5f02519c3ef | 132,479 | py | Python | sox/transform.py | abugler/pysox | 65784bd634e456debb6802b6741287b83f85939b | [
"BSD-3-Clause"
] | null | null | null | sox/transform.py | abugler/pysox | 65784bd634e456debb6802b6741287b83f85939b | [
"BSD-3-Clause"
] | null | null | null | sox/transform.py | abugler/pysox | 65784bd634e456debb6802b6741287b83f85939b | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Python wrapper around the SoX library.
This module requires that SoX is installed.
'''
from __future__ import print_function
from .log import logger
import random
import os
import numpy as np
from .core import ENCODING_VALS
from .core import is_number
from .core import play
from .core import sox
from .core import SoxError
from .core import VALID_FORMATS
from . import file_info
VERBOSITY_VALS = [0, 1, 2, 3, 4]
ENCODINGS_MAPPING = {
np.int16: 's16',
np.int8: 's8',
np.float32: 'f32',
np.float64: 'f64',
}
class Transformer(object):
'''Audio file transformer.
Class which allows multiple effects to be chained to create an output
file, saved to output_filepath.
Methods
-------
set_globals
Overwrite the default global arguments.
build
Execute the current chain of commands to create an output file.
build_file
Alias of build.
build_array
Execute the current chain of commands to create an output array.
'''
def __init__(self):
'''
Attributes
----------
input_format : list of str
Input file format arguments that will be passed to SoX.
output_format : list of str
Output file format arguments that will be bassed to SoX.
effects : list of str
Effects arguments that will be passed to SoX.
effects_log : list of str
Ordered sequence of effects applied.
globals : list of str
Global arguments that will be passed to SoX.
'''
self.input_format = {}
self.output_format = {}
self.effects = []
self.effects_log = []
self.globals = []
self.set_globals()
def set_globals(self, dither=False, guard=False, multithread=False,
replay_gain=False, verbosity=2):
'''Sets SoX's global arguments.
Overwrites any previously set global arguments.
If this function is not explicity called, globals are set to this
function's defaults.
Parameters
----------
dither : bool, default=False
If True, dithering is applied for low files with low bit rates.
guard : bool, default=False
If True, invokes the gain effect to guard against clipping.
multithread : bool, default=False
If True, each channel is processed in parallel.
replay_gain : bool, default=False
If True, applies replay-gain adjustment to input-files.
verbosity : int, default=2
SoX's verbosity level. One of:
* 0 : No messages are shown at all
* 1 : Only error messages are shown. These are generated if SoX
cannot complete the requested commands.
* 2 : Warning messages are also shown. These are generated if
SoX can complete the requested commands, but not exactly
according to the requested command parameters, or if
clipping occurs.
* 3 : Descriptions of SoX’s processing phases are also shown.
Useful for seeing exactly how SoX is processing your audio.
* 4, >4 : Messages to help with debugging SoX are also shown.
'''
if not isinstance(dither, bool):
raise ValueError('dither must be a boolean.')
if not isinstance(guard, bool):
raise ValueError('guard must be a boolean.')
if not isinstance(multithread, bool):
raise ValueError('multithread must be a boolean.')
if not isinstance(replay_gain, bool):
raise ValueError('replay_gain must be a boolean.')
if verbosity not in VERBOSITY_VALS:
raise ValueError(
'Invalid value for VERBOSITY. Must be one {}'.format(
VERBOSITY_VALS)
)
global_args = []
if not dither:
global_args.append('-D')
if guard:
global_args.append('-G')
if multithread:
global_args.append('--multi-threaded')
if replay_gain:
global_args.append('--replay-gain')
global_args.append('track')
global_args.append('-V{}'.format(verbosity))
self.globals = global_args
return self
def _validate_input_format(self, input_format):
'''Private helper function for validating input formats
'''
file_type = input_format.get('file_type')
rate = input_format.get('rate')
bits = input_format.get('bits')
channels = input_format.get('channels')
encoding = input_format.get('encoding')
ignore_length = input_format.get('ignore_length', False)
if file_type not in VALID_FORMATS + [None]:
raise ValueError(
'Invalid file_type. Must be one of {}'.format(VALID_FORMATS)
)
if not is_number(rate) and rate is not None:
raise ValueError('rate must be a float or None')
if rate is not None and rate <= 0:
raise ValueError('rate must be a positive number')
if not isinstance(bits, int) and bits is not None:
raise ValueError('bits must be an int or None')
if bits is not None and bits <= 0:
raise ValueError('bits must be a positive number')
if not isinstance(channels, int) and channels is not None:
raise ValueError('channels must be an int or None')
if channels is not None and channels <= 0:
raise ValueError('channels must be a positive number')
if encoding not in ENCODING_VALS + [None]:
raise ValueError(
'Invalid encoding {}. Must be one of {}'.format(
encoding, ENCODING_VALS)
)
if not isinstance(ignore_length, bool):
raise ValueError('ignore_length must be a boolean')
def _input_format_args(self, input_format):
'''Private helper function for set_input_format
'''
self._validate_input_format(input_format)
file_type = input_format.get('file_type')
rate = input_format.get('rate')
bits = input_format.get('bits')
channels = input_format.get('channels')
encoding = input_format.get('encoding')
ignore_length = input_format.get('ignore_length', False)
input_format_args = []
if file_type is not None:
input_format_args.extend(['-t', '{}'.format(file_type)])
if rate is not None:
input_format_args.extend(['-r', '{:f}'.format(rate)])
if bits is not None:
input_format_args.extend(['-b', '{}'.format(bits)])
if channels is not None:
input_format_args.extend(['-c', '{}'.format(channels)])
if encoding is not None:
input_format_args.extend(['-e', '{}'.format(encoding)])
if ignore_length:
input_format_args.append('--ignore-length')
return input_format_args
def set_input_format(self, file_type=None, rate=None, bits=None,
channels=None, encoding=None, ignore_length=False):
'''Sets input file format arguments. This is primarily useful when
dealing with audio files without a file extension. Overwrites any
previously set input file arguments.
If this function is not explicity called the input format is inferred
from the file extension or the file's header.
Parameters
----------
file_type : str or None, default=None
The file type of the input audio file. Should be the same as what
the file extension would be, for ex. 'mp3' or 'wav'.
rate : float or None, default=None
The sample rate of the input audio file. If None the sample rate
is inferred.
bits : int or None, default=None
The number of bits per sample. If None, the number of bits per
sample is inferred.
channels : int or None, default=None
The number of channels in the audio file. If None the number of
channels is inferred.
encoding : str or None, default=None
The audio encoding type. Sometimes needed with file-types that
support more than one encoding type. One of:
* signed-integer : PCM data stored as signed (‘two’s
complement’) integers. Commonly used with a 16 or 24−bit
encoding size. A value of 0 represents minimum signal
power.
* unsigned-integer : PCM data stored as unsigned integers.
Commonly used with an 8-bit encoding size. A value of 0
represents maximum signal power.
* floating-point : PCM data stored as IEEE 753 single precision
(32-bit) or double precision (64-bit) floating-point
(‘real’) numbers. A value of 0 represents minimum signal
power.
* a-law : International telephony standard for logarithmic
encoding to 8 bits per sample. It has a precision
equivalent to roughly 13-bit PCM and is sometimes encoded
with reversed bit-ordering.
* u-law : North American telephony standard for logarithmic
encoding to 8 bits per sample. A.k.a. μ-law. It has a
precision equivalent to roughly 14-bit PCM and is sometimes
encoded with reversed bit-ordering.
* oki-adpcm : OKI (a.k.a. VOX, Dialogic, or Intel) 4-bit ADPCM;
it has a precision equivalent to roughly 12-bit PCM. ADPCM
is a form of audio compression that has a good compromise
between audio quality and encoding/decoding speed.
* ima-adpcm : IMA (a.k.a. DVI) 4-bit ADPCM; it has a precision
equivalent to roughly 13-bit PCM.
* ms-adpcm : Microsoft 4-bit ADPCM; it has a precision
equivalent to roughly 14-bit PCM.
* gsm-full-rate : GSM is currently used for the vast majority
of the world’s digital wireless telephone calls. It
utilises several audio formats with different bit-rates and
associated speech quality. SoX has support for GSM’s
original 13kbps ‘Full Rate’ audio format. It is usually
CPU-intensive to work with GSM audio.
ignore_length : bool, default=False
If True, overrides an (incorrect) audio length given in an audio
file’s header. If this option is given then SoX will keep reading
audio until it reaches the end of the input file.
'''
input_format = {
'file_type': file_type,
'rate': rate,
'bits': bits,
'channels': channels,
'encoding': encoding,
'ignore_length': ignore_length
}
self._validate_input_format(input_format)
self.input_format = input_format
def _validate_output_format(self, output_format):
'''Private helper function for validating input formats
'''
file_type = output_format.get('file_type')
rate = output_format.get('rate')
bits = output_format.get('bits')
channels = output_format.get('channels')
encoding = output_format.get('encoding')
comments = output_format.get('comments')
append_comments = output_format.get('append_comments', True)
if file_type not in VALID_FORMATS + [None]:
raise ValueError(
'Invalid file_type. Must be one of {}'.format(VALID_FORMATS)
)
if not is_number(rate) and rate is not None:
raise ValueError('rate must be a float or None')
if rate is not None and rate <= 0:
raise ValueError('rate must be a positive number')
if not isinstance(bits, int) and bits is not None:
raise ValueError('bits must be an int or None')
if bits is not None and bits <= 0:
raise ValueError('bits must be a positive number')
if not isinstance(channels, int) and channels is not None:
raise ValueError('channels must be an int or None')
if channels is not None and channels <= 0:
raise ValueError('channels must be a positive number')
if encoding not in ENCODING_VALS + [None]:
raise ValueError(
'Invalid encoding. Must be one of {}'.format(ENCODING_VALS)
)
if comments is not None and not isinstance(comments, str):
raise ValueError('comments must be a string or None')
if not isinstance(append_comments, bool):
raise ValueError('append_comments must be a boolean')
def _output_format_args(self, output_format):
'''Private helper function for set_output_format
'''
self._validate_output_format(output_format)
file_type = output_format.get('file_type')
rate = output_format.get('rate')
bits = output_format.get('bits')
channels = output_format.get('channels')
encoding = output_format.get('encoding')
comments = output_format.get('comments')
append_comments = output_format.get('append_comments', True)
output_format_args = []
if file_type is not None:
output_format_args.extend(['-t', '{}'.format(file_type)])
if rate is not None:
output_format_args.extend(['-r', '{:f}'.format(rate)])
if bits is not None:
output_format_args.extend(['-b', '{}'.format(bits)])
if channels is not None:
output_format_args.extend(['-c', '{}'.format(channels)])
if encoding is not None:
output_format_args.extend(['-e', '{}'.format(encoding)])
if comments is not None:
if append_comments:
output_format_args.extend(['--add-comment', comments])
else:
output_format_args.extend(['--comment', comments])
return output_format_args
def set_output_format(self, file_type=None, rate=None, bits=None,
channels=None, encoding=None, comments=None,
append_comments=True):
'''Sets output file format arguments. These arguments will overwrite
any format related arguments supplied by other effects (e.g. rate).
If this function is not explicity called the output format is inferred
from the file extension or the file's header.
Parameters
----------
file_type : str or None, default=None
The file type of the output audio file. Should be the same as what
the file extension would be, for ex. 'mp3' or 'wav'.
rate : float or None, default=None
The sample rate of the output audio file. If None the sample rate
is inferred.
bits : int or None, default=None
The number of bits per sample. If None, the number of bits per
sample is inferred.
channels : int or None, default=None
The number of channels in the audio file. If None the number of
channels is inferred.
encoding : str or None, default=None
The audio encoding type. Sometimes needed with file-types that
support more than one encoding type. One of:
* signed-integer : PCM data stored as signed (‘two’s
complement’) integers. Commonly used with a 16 or 24−bit
encoding size. A value of 0 represents minimum signal
power.
* unsigned-integer : PCM data stored as unsigned integers.
Commonly used with an 8-bit encoding size. A value of 0
represents maximum signal power.
* floating-point : PCM data stored as IEEE 753 single precision
(32-bit) or double precision (64-bit) floating-point
(‘real’) numbers. A value of 0 represents minimum signal
power.
* a-law : International telephony standard for logarithmic
encoding to 8 bits per sample. It has a precision
equivalent to roughly 13-bit PCM and is sometimes encoded
with reversed bit-ordering.
* u-law : North American telephony standard for logarithmic
encoding to 8 bits per sample. A.k.a. μ-law. It has a
precision equivalent to roughly 14-bit PCM and is sometimes
encoded with reversed bit-ordering.
* oki-adpcm : OKI (a.k.a. VOX, Dialogic, or Intel) 4-bit ADPCM;
it has a precision equivalent to roughly 12-bit PCM. ADPCM
is a form of audio compression that has a good compromise
between audio quality and encoding/decoding speed.
* ima-adpcm : IMA (a.k.a. DVI) 4-bit ADPCM; it has a precision
equivalent to roughly 13-bit PCM.
* ms-adpcm : Microsoft 4-bit ADPCM; it has a precision
equivalent to roughly 14-bit PCM.
* gsm-full-rate : GSM is currently used for the vast majority
of the world’s digital wireless telephone calls. It
utilises several audio formats with different bit-rates and
associated speech quality. SoX has support for GSM’s
original 13kbps ‘Full Rate’ audio format. It is usually
CPU-intensive to work with GSM audio.
comments : str or None, default=None
If not None, the string is added as a comment in the header of the
output audio file. If None, no comments are added.
append_comments : bool, default=True
If True, comment strings are appended to SoX's default comments. If
False, the supplied comment replaces the existing comment.
'''
output_format = {
'file_type': file_type,
'rate': rate,
'bits': bits,
'channels': channels,
'encoding': encoding,
'comments': comments,
'append_comments': append_comments
}
self._validate_output_format(output_format)
self.output_format = output_format
def clear_effects(self):
'''Remove all effects processes.
'''
self.effects = list()
self.effects_log = list()
return self
def _parse_inputs(self, input_filepath, input_array, sample_rate_in):
'''Private helper function for parsing inputs to build and build_array
Parameters
----------
input_filepath : str or None
Either path to input audio file or None.
input_array : np.ndarray or None
A np.ndarray of an waveform with shape (n_samples, n_channels)
or None
sample_rate_in : int or None
Sample rate of input_array or None
Returns
-------
input_format : dict
Input format dictionary
input_filepath : str
Formatted input filepath.
'''
if input_filepath is not None and input_array is not None:
raise ValueError(
"Only one of input_filepath and input_array may be specified"
)
# set input parameters
if input_filepath is not None:
file_info.validate_input_file(input_filepath)
input_format = self.input_format
if input_format.get('channels') is None:
input_format['channels'] = file_info.channels(input_filepath)
elif input_array is not None:
if not isinstance(input_array, np.ndarray):
raise TypeError("input_array must be a numpy array or None")
if sample_rate_in is None:
raise ValueError(
"sample_rate_in must be specified for array inputs"
)
input_filepath = '-'
input_format = {
'file_type': ENCODINGS_MAPPING[input_array.dtype.type],
'rate': sample_rate_in,
'bits': None,
'channels': (
input_array.shape[-1] if len(input_array.shape) > 1 else 1
),
'encoding': None,
'ignore_length': False
}
else:
raise ValueError(
"One of input_filepath or input_array must be specified"
)
return input_format, input_filepath
def build(self, input_filepath=None, output_filepath=None,
input_array=None, sample_rate_in=None,
extra_args=None, return_output=False):
'''Given an input file or array, creates an output_file on disk by
executing the current set of commands. This function returns True on
success. If return_output is True, this function returns a triple of
(status, out, err), giving the success state, along with stdout and
stderr returned by sox.
Parameters
----------
input_filepath : str or None
Either path to input audio file or None for array input.
output_filepath : str
Path to desired output file. If a file already exists at
the given path, the file will be overwritten.
If '-n', no file is created.
input_array : np.ndarray or None
An np.ndarray of an waveform with shape (n_samples, n_channels).
sample_rate_in must also be provided.
If None, input_filepath must be specified.
sample_rate_in : int
Sample rate of input_array.
This argument is ignored if input_array is None.
extra_args : list or None, default=None
If a list is given, these additional arguments are passed to SoX
at the end of the list of effects.
Don't use this argument unless you know exactly what you're doing!
return_output : bool, default=False
If True, returns the status and information sent to stderr and
stdout as a tuple (status, stdout, stderr).
If output_filepath is None, return_output=True by default.
If False, returns True on success.
Returns
-------
status : bool
True on success.
out : str (optional)
This is not returned unless return_output is True.
When returned, captures the stdout produced by sox.
err : str (optional)
This is not returned unless return_output is True.
When returned, captures the stderr produced by sox.
Examples
--------
>>> import numpy as np
>>> import sox
>>> tfm = sox.Transformer()
>>> sample_rate = 44100
>>> y = np.sin(2 * np.pi * 440.0 * np.arange(sample_rate * 1.0) / sample_rate)
file in, file out - basic usage
>>> status = tfm.build('path/to/input.wav', 'path/to/output.mp3')
file in, file out - equivalent usage
>>> status = tfm.build(
input_filepath='path/to/input.wav',
output_filepath='path/to/output.mp3'
)
array in, file out
>>> status = tfm.build(
input_array=y, sample_rate_in=sample_rate,
output_filepath='path/to/output.mp3'
)
'''
input_format, input_filepath = self._parse_inputs(
input_filepath, input_array, sample_rate_in
)
if output_filepath is None:
raise ValueError("output_filepath is not specified!")
# set output parameters
if input_filepath == output_filepath:
raise ValueError(
"input_filepath must be different from output_filepath."
)
file_info.validate_output_file(output_filepath)
args = []
args.extend(self.globals)
args.extend(self._input_format_args(input_format))
args.append(input_filepath)
args.extend(self._output_format_args(self.output_format))
args.append(output_filepath)
args.extend(self.effects)
if extra_args is not None:
if not isinstance(extra_args, list):
raise ValueError("extra_args must be a list.")
args.extend(extra_args)
status, out, err = sox(args, input_array, True)
if status != 0:
raise SoxError(
"Stdout: {}\nStderr: {}".format(out, err)
)
logger.info(
"Created %s with effects: %s",
output_filepath,
" ".join(self.effects_log)
)
if return_output:
return status, out, err
return True
def build_file(self, input_filepath=None, output_filepath=None,
input_array=None, sample_rate_in=None,
extra_args=None, return_output=False):
'''An alias for build.
Given an input file or array, creates an output_file on disk by
executing the current set of commands. This function returns True on
success. If return_output is True, this function returns a triple of
(status, out, err), giving the success state, along with stdout and
stderr returned by sox.
Parameters
----------
input_filepath : str or None
Either path to input audio file or None for array input.
output_filepath : str
Path to desired output file. If a file already exists at
the given path, the file will be overwritten.
If '-n', no file is created.
input_array : np.ndarray or None
An np.ndarray of an waveform with shape (n_samples, n_channels).
sample_rate_in must also be provided.
If None, input_filepath must be specified.
sample_rate_in : int
Sample rate of input_array.
This argument is ignored if input_array is None.
extra_args : list or None, default=None
If a list is given, these additional arguments are passed to SoX
at the end of the list of effects.
Don't use this argument unless you know exactly what you're doing!
return_output : bool, default=False
If True, returns the status and information sent to stderr and
stdout as a tuple (status, stdout, stderr).
If output_filepath is None, return_output=True by default.
If False, returns True on success.
Returns
-------
status : bool
True on success.
out : str (optional)
This is not returned unless return_output is True.
When returned, captures the stdout produced by sox.
err : str (optional)
This is not returned unless return_output is True.
When returned, captures the stderr produced by sox.
Examples
--------
>>> import numpy as np
>>> import sox
>>> tfm = sox.Transformer()
>>> sample_rate = 44100
>>> y = np.sin(2 * np.pi * 440.0 * np.arange(sample_rate * 1.0) / sample_rate)
file in, file out - basic usage
>>> status = tfm.build('path/to/input.wav', 'path/to/output.mp3')
file in, file out - equivalent usage
>>> status = tfm.build(
input_filepath='path/to/input.wav',
output_filepath='path/to/output.mp3'
)
array in, file out
>>> status = tfm.build(
input_array=y, sample_rate_in=sample_rate,
output_filepath='path/to/output.mp3'
)
'''
return self.build(
input_filepath, output_filepath, input_array, sample_rate_in,
extra_args, return_output
)
def build_array(self, input_filepath=None, input_array=None,
sample_rate_in=None, extra_args=None):
'''Given an input file or array, returns the ouput as a numpy array
by executing the current set of commands. By default the array will
have the same sample rate as the input file unless otherwise specified
using set_output_format. Functions such as rate, channels and convert
will be ignored!
Parameters
----------
input_filepath : str or None
Either path to input audio file or None.
input_array : np.ndarray or None
A np.ndarray of an waveform with shape (n_samples, n_channels).
If this argument is passed, sample_rate_in must also be provided.
If None, input_filepath must be specified.
sample_rate_in : int
Sample rate of input_array.
This argument is ignored if input_array is None.
extra_args : list or None, default=None
If a list is given, these additional arguments are passed to SoX
at the end of the list of effects.
Don't use this argument unless you know exactly what you're doing!
Returns
-------
output_array : np.ndarray
Output audio as a numpy array
Examples
--------
>>> import numpy as np
>>> import sox
>>> tfm = sox.Transformer()
>>> sample_rate = 44100
>>> y = np.sin(2 * np.pi * 440.0 * np.arange(sample_rate * 1.0) / sample_rate)
file in, array out
>>> output_array = tfm.build(input_filepath='path/to/input.wav')
array in, array out
>>> output_array = tfm.build(input_array=y, sample_rate_in=sample_rate)
specifying the output sample rate
>>> tfm.set_output_format(rate=8000)
>>> output_array = tfm.build(input_array=y, sample_rate_in=sample_rate)
if an effect changes the number of channels, you must explicitly
specify the number of output channels
>>> tfm.remix(remix_dictionary={1: [1], 2: [1], 3: [1]})
>>> tfm.set_output_format(channels=3)
>>> output_array = tfm.build(input_array=y, sample_rate_in=sample_rate)
'''
input_format, input_filepath = self._parse_inputs(
input_filepath, input_array, sample_rate_in
)
# check if any of the below commands are part of the effects chain
ignored_commands = ['rate', 'channels', 'convert']
if set(ignored_commands) & set(self.effects_log):
logger.warning(
"When outputting to an array, rate, channels and convert " +
"effects may be ignored. Use set_output_format() to " +
"specify output formats."
)
output_filepath = '-'
if input_format.get('file_type') is None:
encoding_out = np.int16
else:
encoding_out = [
k for k, v in ENCODINGS_MAPPING.items()
if input_format['file_type'] == v
][0]
n_bits = np.dtype(encoding_out).itemsize * 8
output_format = {
'file_type': 'raw',
'rate': sample_rate_in,
'bits': n_bits,
'channels': input_format['channels'],
'encoding': None,
'comments': None,
'append_comments': True,
}
if self.output_format.get('rate') is not None:
output_format['rate'] = self.output_format['rate']
if self.output_format.get('channels') is not None:
output_format['channels'] = self.output_format['channels']
if self.output_format.get('bits') is not None:
n_bits = self.output_format['bits']
output_format['bits'] = n_bits
if n_bits == 8:
encoding_out = np.int8
elif n_bits == 16:
encoding_out = np.int16
elif n_bits == 32:
encoding_out = np.float32
elif n_bits == 64:
encoding_out = np.float64
else:
raise ValueError("invalid n_bits {}".format(n_bits))
args = []
args.extend(self.globals)
args.extend(self._input_format_args(input_format))
args.append(input_filepath)
args.extend(self._output_format_args(output_format))
args.append(output_filepath)
args.extend(self.effects)
if extra_args is not None:
if not isinstance(extra_args, list):
raise ValueError("extra_args must be a list.")
args.extend(extra_args)
status, out, err = sox(args, input_array, False)
if status != 0:
raise SoxError(
"Stdout: {}\nStderr: {}".format(out, err)
)
out = np.frombuffer(out, dtype=encoding_out)
if output_format['channels'] > 1:
out = out.reshape(
(
output_format['channels'],
int(len(out) / output_format['channels'])
), order='F'
).T
logger.info(
"Created array with effects: %s",
" ".join(self.effects_log)
)
return out
def preview(self, input_filepath):
'''Play a preview of the output with the current set of effects
Parameters
----------
input_filepath : str
Path to input audio file.
'''
args = ["play", "--no-show-progress"]
args.extend(self.globals)
args.extend(self.input_format)
args.append(input_filepath)
args.extend(self.effects)
play(args)
def allpass(self, frequency, width_q=2.0):
'''Apply a two-pole all-pass filter. An all-pass filter changes the
audio’s frequency to phase relationship without changing its frequency
to amplitude relationship. The filter is described in detail in at
http://musicdsp.org/files/Audio-EQ-Cookbook.txt
Parameters
----------
frequency : float
The filter's center frequency in Hz.
width_q : float, default=2.0
The filter's width as a Q-factor.
See Also
--------
equalizer, highpass, lowpass, sinc
'''
if not is_number(frequency) or frequency <= 0:
raise ValueError("frequency must be a positive number.")
if not is_number(width_q) or width_q <= 0:
raise ValueError("width_q must be a positive number.")
effect_args = [
'allpass', '{:f}'.format(frequency), '{:f}q'.format(width_q)
]
self.effects.extend(effect_args)
self.effects_log.append('allpass')
return self
def bandpass(self, frequency, width_q=2.0, constant_skirt=False):
'''Apply a two-pole Butterworth band-pass filter with the given central
frequency, and (3dB-point) band-width. The filter rolls off at 6dB per
octave (20dB per decade) and is described in detail in
http://musicdsp.org/files/Audio-EQ-Cookbook.txt
Parameters
----------
frequency : float
The filter's center frequency in Hz.
width_q : float, default=2.0
The filter's width as a Q-factor.
constant_skirt : bool, default=False
If True, selects constant skirt gain (peak gain = width_q).
If False, selects constant 0dB peak gain.
See Also
--------
bandreject, sinc
'''
if not is_number(frequency) or frequency <= 0:
raise ValueError("frequency must be a positive number.")
if not is_number(width_q) or width_q <= 0:
raise ValueError("width_q must be a positive number.")
if not isinstance(constant_skirt, bool):
raise ValueError("constant_skirt must be a boolean.")
effect_args = ['bandpass']
if constant_skirt:
effect_args.append('-c')
effect_args.extend(['{:f}'.format(frequency), '{:f}q'.format(width_q)])
self.effects.extend(effect_args)
self.effects_log.append('bandpass')
return self
def bandreject(self, frequency, width_q=2.0):
'''Apply a two-pole Butterworth band-reject filter with the given
central frequency, and (3dB-point) band-width. The filter rolls off at
6dB per octave (20dB per decade) and is described in detail in
http://musicdsp.org/files/Audio-EQ-Cookbook.txt
Parameters
----------
frequency : float
The filter's center frequency in Hz.
width_q : float, default=2.0
The filter's width as a Q-factor.
constant_skirt : bool, default=False
If True, selects constant skirt gain (peak gain = width_q).
If False, selects constant 0dB peak gain.
See Also
--------
bandreject, sinc
'''
if not is_number(frequency) or frequency <= 0:
raise ValueError("frequency must be a positive number.")
if not is_number(width_q) or width_q <= 0:
raise ValueError("width_q must be a positive number.")
effect_args = [
'bandreject', '{:f}'.format(frequency), '{:f}q'.format(width_q)
]
self.effects.extend(effect_args)
self.effects_log.append('bandreject')
return self
def bass(self, gain_db, frequency=100.0, slope=0.5):
'''Boost or cut the bass (lower) frequencies of the audio using a
two-pole shelving filter with a response similar to that of a standard
hi-fi’s tone-controls. This is also known as shelving equalisation.
The filters are described in detail in
http://musicdsp.org/files/Audio-EQ-Cookbook.txt
Parameters
----------
gain_db : float
The gain at 0 Hz.
For a large cut use -20, for a large boost use 20.
frequency : float, default=100.0
The filter's cutoff frequency in Hz.
slope : float, default=0.5
The steepness of the filter's shelf transition.
For a gentle slope use 0.3, and use 1.0 for a steep slope.
See Also
--------
treble, equalizer
'''
if not is_number(gain_db):
raise ValueError("gain_db must be a number")
if not is_number(frequency) or frequency <= 0:
raise ValueError("frequency must be a positive number.")
if not is_number(slope) or slope <= 0 or slope > 1.0:
raise ValueError("width_q must be a positive number.")
effect_args = [
'bass', '{:f}'.format(gain_db), '{:f}'.format(frequency),
'{:f}s'.format(slope)
]
self.effects.extend(effect_args)
self.effects_log.append('bass')
return self
def bend(self, n_bends, start_times, end_times, cents, frame_rate=25,
oversample_rate=16):
'''Changes pitch by specified amounts at specified times.
The pitch-bending algorithm utilises the Discrete Fourier Transform
(DFT) at a particular frame rate and over-sampling rate.
Parameters
----------
n_bends : int
The number of intervals to pitch shift
start_times : list of floats
A list of absolute start times (in seconds), in order
end_times : list of floats
A list of absolute end times (in seconds) in order.
[start_time, end_time] intervals may not overlap!
cents : list of floats
A list of pitch shifts in cents. A positive value shifts the pitch
up, a negative value shifts the pitch down.
frame_rate : int, default=25
The number of DFT frames to process per second, between 10 and 80
oversample_rate: int, default=16
The number of frames to over sample per second, between 4 and 32
See Also
--------
pitch
'''
if not isinstance(n_bends, int) or n_bends < 1:
raise ValueError("n_bends must be a positive integer.")
if not isinstance(start_times, list) or len(start_times) != n_bends:
raise ValueError("start_times must be a list of length n_bends.")
if any([(not is_number(p) or p <= 0) for p in start_times]):
raise ValueError("start_times must be positive floats.")
if sorted(start_times) != start_times:
raise ValueError("start_times must be in increasing order.")
if not isinstance(end_times, list) or len(end_times) != n_bends:
raise ValueError("end_times must be a list of length n_bends.")
if any([(not is_number(p) or p <= 0) for p in end_times]):
raise ValueError("end_times must be positive floats.")
if sorted(end_times) != end_times:
raise ValueError("end_times must be in increasing order.")
if any([e <= s for s, e in zip(start_times, end_times)]):
raise ValueError(
"end_times must be element-wise greater than start_times."
)
if any([e > s for s, e in zip(start_times[1:], end_times[:-1])]):
raise ValueError(
"[start_time, end_time] intervals must be non-overlapping."
)
if not isinstance(cents, list) or len(cents) != n_bends:
raise ValueError("cents must be a list of length n_bends.")
if any([not is_number(p) for p in cents]):
raise ValueError("elements of cents must be floats.")
if (not isinstance(frame_rate, int) or
frame_rate < 10 or frame_rate > 80):
raise ValueError("frame_rate must be an integer between 10 and 80")
if (not isinstance(oversample_rate, int) or
oversample_rate < 4 or oversample_rate > 32):
raise ValueError(
"oversample_rate must be an integer between 4 and 32."
)
effect_args = [
'bend',
'-f', '{}'.format(frame_rate),
'-o', '{}'.format(oversample_rate)
]
last = 0
for i in range(n_bends):
t_start = round(start_times[i] - last, 2)
t_end = round(end_times[i] - start_times[i], 2)
effect_args.append(
'{:f},{:f},{:f}'.format(t_start, cents[i], t_end)
)
last = end_times[i]
self.effects.extend(effect_args)
self.effects_log.append('bend')
return self
def biquad(self, b, a):
'''Apply a biquad IIR filter with the given coefficients.
Parameters
----------
b : list of floats
Numerator coefficients. Must be length 3
a : list of floats
Denominator coefficients. Must be length 3
See Also
--------
fir, treble, bass, equalizer
'''
if not isinstance(b, list):
raise ValueError('b must be a list.')
if not isinstance(a, list):
raise ValueError('a must be a list.')
if len(b) != 3:
raise ValueError('b must be a length 3 list.')
if len(a) != 3:
raise ValueError('a must be a length 3 list.')
if not all([is_number(b_val) for b_val in b]):
raise ValueError('all elements of b must be numbers.')
if not all([is_number(a_val) for a_val in a]):
raise ValueError('all elements of a must be numbers.')
effect_args = [
'biquad', '{:f}'.format(b[0]), '{:f}'.format(b[1]),
'{:f}'.format(b[2]), '{:f}'.format(a[0]),
'{:f}'.format(a[1]), '{:f}'.format(a[2])
]
self.effects.extend(effect_args)
self.effects_log.append('biquad')
return self
def channels(self, n_channels):
'''Change the number of channels in the audio signal. If decreasing the
number of channels it mixes channels together, if increasing the number
of channels it duplicates.
Note: This overrides arguments used in the convert effect!
Parameters
----------
n_channels : int
Desired number of channels.
See Also
--------
convert
'''
if not isinstance(n_channels, int) or n_channels <= 0:
raise ValueError('n_channels must be a positive integer.')
effect_args = ['channels', '{}'.format(n_channels)]
self.effects.extend(effect_args)
self.effects_log.append('channels')
return self
def chorus(self, gain_in=0.5, gain_out=0.9, n_voices=3, delays=None,
decays=None, speeds=None, depths=None, shapes=None):
'''Add a chorus effect to the audio. This can makeasingle vocal sound
like a chorus, but can also be applied to instrumentation.
Chorus resembles an echo effect with a short delay, but whereas with
echo the delay is constant, with chorus, it is varied using sinusoidal
or triangular modulation. The modulation depth defines the range the
modulated delay is played before or after the delay. Hence the delayed
sound will sound slower or faster, that is the delayed sound tuned
around the original one, like in a chorus where some vocals are
slightly off key.
Parameters
----------
gain_in : float, default=0.3
The time in seconds over which the instantaneous level of the input
signal is averaged to determine increases in volume.
gain_out : float, default=0.8
The time in seconds over which the instantaneous level of the input
signal is averaged to determine decreases in volume.
n_voices : int, default=3
The number of voices in the chorus effect.
delays : list of floats > 20 or None, default=None
If a list, the list of delays (in miliseconds) of length n_voices.
If None, the individual delay parameters are chosen automatically
to be between 40 and 60 miliseconds.
decays : list of floats or None, default=None
If a list, the list of decays (as a fraction of gain_in) of length
n_voices.
If None, the individual decay parameters are chosen automatically
to be between 0.3 and 0.4.
speeds : list of floats or None, default=None
If a list, the list of modulation speeds (in Hz) of length n_voices
If None, the individual speed parameters are chosen automatically
to be between 0.25 and 0.4 Hz.
depths : list of floats or None, default=None
If a list, the list of depths (in miliseconds) of length n_voices.
If None, the individual delay parameters are chosen automatically
to be between 1 and 3 miliseconds.
shapes : list of 's' or 't' or None, deault=None
If a list, the list of modulation shapes - 's' for sinusoidal or
't' for triangular - of length n_voices.
If None, the individual shapes are chosen automatically.
'''
if not is_number(gain_in) or gain_in <= 0 or gain_in > 1:
raise ValueError("gain_in must be a number between 0 and 1.")
if not is_number(gain_out) or gain_out <= 0 or gain_out > 1:
raise ValueError("gain_out must be a number between 0 and 1.")
if not isinstance(n_voices, int) or n_voices <= 0:
raise ValueError("n_voices must be a positive integer.")
# validate delays
if not (delays is None or isinstance(delays, list)):
raise ValueError("delays must be a list or None")
if delays is not None:
if len(delays) != n_voices:
raise ValueError("the length of delays must equal n_voices")
if any((not is_number(p) or p < 20) for p in delays):
raise ValueError("the elements of delays must be numbers > 20")
else:
delays = [random.uniform(40, 60) for _ in range(n_voices)]
# validate decays
if not (decays is None or isinstance(decays, list)):
raise ValueError("decays must be a list or None")
if decays is not None:
if len(decays) != n_voices:
raise ValueError("the length of decays must equal n_voices")
if any((not is_number(p) or p <= 0 or p > 1) for p in decays):
raise ValueError(
"the elements of decays must be between 0 and 1"
)
else:
decays = [random.uniform(0.3, 0.4) for _ in range(n_voices)]
# validate speeds
if not (speeds is None or isinstance(speeds, list)):
raise ValueError("speeds must be a list or None")
if speeds is not None:
if len(speeds) != n_voices:
raise ValueError("the length of speeds must equal n_voices")
if any((not is_number(p) or p <= 0) for p in speeds):
raise ValueError("the elements of speeds must be numbers > 0")
else:
speeds = [random.uniform(0.25, 0.4) for _ in range(n_voices)]
# validate depths
if not (depths is None or isinstance(depths, list)):
raise ValueError("depths must be a list or None")
if depths is not None:
if len(depths) != n_voices:
raise ValueError("the length of depths must equal n_voices")
if any((not is_number(p) or p <= 0) for p in depths):
raise ValueError("the elements of depths must be numbers > 0")
else:
depths = [random.uniform(1.0, 3.0) for _ in range(n_voices)]
# validate shapes
if not (shapes is None or isinstance(shapes, list)):
raise ValueError("shapes must be a list or None")
if shapes is not None:
if len(shapes) != n_voices:
raise ValueError("the length of shapes must equal n_voices")
if any((p not in ['t', 's']) for p in shapes):
raise ValueError("the elements of shapes must be 's' or 't'")
else:
shapes = [random.choice(['t', 's']) for _ in range(n_voices)]
effect_args = ['chorus', '{}'.format(gain_in), '{}'.format(gain_out)]
for i in range(n_voices):
effect_args.extend([
'{:f}'.format(delays[i]),
'{:f}'.format(decays[i]),
'{:f}'.format(speeds[i]),
'{:f}'.format(depths[i]),
'-{}'.format(shapes[i])
])
self.effects.extend(effect_args)
self.effects_log.append('chorus')
return self
def compand(self, attack_time=0.3, decay_time=0.8, soft_knee_db=6.0,
tf_points=[(-70, -70), (-60, -20), (0, 0)],
):
'''Compand (compress or expand) the dynamic range of the audio.
Parameters
----------
attack_time : float, default=0.3
The time in seconds over which the instantaneous level of the input
signal is averaged to determine increases in volume.
decay_time : float, default=0.8
The time in seconds over which the instantaneous level of the input
signal is averaged to determine decreases in volume.
soft_knee_db : float or None, default=6.0
The ammount (in dB) for which the points at where adjacent line
segments on the transfer function meet will be rounded.
If None, no soft_knee is applied.
tf_points : list of tuples
Transfer function points as a list of tuples corresponding to
points in (dB, dB) defining the compander's transfer function.
See Also
--------
mcompand, contrast
'''
if not is_number(attack_time) or attack_time <= 0:
raise ValueError("attack_time must be a positive number.")
if not is_number(decay_time) or decay_time <= 0:
raise ValueError("decay_time must be a positive number.")
if attack_time > decay_time:
logger.warning(
"attack_time is larger than decay_time.\n"
"For most situations, attack_time should be shorter than "
"decay time because the human ear is more sensitive to sudden "
"loud music than sudden soft music."
)
if not (is_number(soft_knee_db) or soft_knee_db is None):
raise ValueError("soft_knee_db must be a number or None.")
if not isinstance(tf_points, list):
raise TypeError("tf_points must be a list.")
if len(tf_points) == 0:
raise ValueError("tf_points must have at least one point.")
if any(not isinstance(pair, tuple) for pair in tf_points):
raise ValueError("elements of tf_points must be pairs")
if any(len(pair) != 2 for pair in tf_points):
raise ValueError("Tuples in tf_points must be length 2")
if any(not (is_number(p[0]) and is_number(p[1])) for p in tf_points):
raise ValueError("Tuples in tf_points must be pairs of numbers.")
if any((p[0] > 0 or p[1] > 0) for p in tf_points):
raise ValueError("Tuple values in tf_points must be <= 0 (dB).")
if len(tf_points) > len(set([p[0] for p in tf_points])):
raise ValueError("Found duplicate x-value in tf_points.")
tf_points = sorted(
tf_points,
key=lambda tf_points: tf_points[0]
)
transfer_list = []
for point in tf_points:
transfer_list.extend([
"{:f}".format(point[0]), "{:f}".format(point[1])
])
effect_args = [
'compand',
"{:f},{:f}".format(attack_time, decay_time)
]
if soft_knee_db is not None:
effect_args.append(
"{:f}:{}".format(soft_knee_db, ",".join(transfer_list))
)
else:
effect_args.append(",".join(transfer_list))
self.effects.extend(effect_args)
self.effects_log.append('compand')
return self
def contrast(self, amount=75):
'''Comparable with compression, this effect modifies an audio signal to
make it sound louder.
Parameters
----------
amount : float
Amount of enhancement between 0 and 100.
See Also
--------
compand, mcompand
'''
if not is_number(amount) or amount < 0 or amount > 100:
raise ValueError('amount must be a number between 0 and 100.')
effect_args = ['contrast', '{:f}'.format(amount)]
self.effects.extend(effect_args)
self.effects_log.append('contrast')
return self
def convert(self, samplerate=None, n_channels=None, bitdepth=None):
'''Converts output audio to the specified format.
Parameters
----------
samplerate : float, default=None
Desired samplerate. If None, defaults to the same as input.
n_channels : int, default=None
Desired number of channels. If None, defaults to the same as input.
bitdepth : int, default=None
Desired bitdepth. If None, defaults to the same as input.
See Also
--------
rate
'''
bitdepths = [8, 16, 24, 32, 64]
if bitdepth is not None:
if bitdepth not in bitdepths:
raise ValueError(
"bitdepth must be one of {}.".format(str(bitdepths))
)
self.output_format['bits'] = bitdepth
if n_channels is not None:
if not isinstance(n_channels, int) or n_channels <= 0:
raise ValueError(
"n_channels must be a positive integer."
)
self.output_format['channels'] = n_channels
if samplerate is not None:
if not is_number(samplerate) or samplerate <= 0:
raise ValueError("samplerate must be a positive number.")
self.rate(samplerate)
return self
def dcshift(self, shift=0.0):
'''Apply a DC shift to the audio.
Parameters
----------
shift : float
Amount to shift audio between -2 and 2. (Audio is between -1 and 1)
See Also
--------
highpass
'''
if not is_number(shift) or shift < -2 or shift > 2:
raise ValueError('shift must be a number between -2 and 2.')
effect_args = ['dcshift', '{:f}'.format(shift)]
self.effects.extend(effect_args)
self.effects_log.append('dcshift')
return self
def deemph(self):
'''Apply Compact Disc (IEC 60908) de-emphasis (a treble attenuation
shelving filter). Pre-emphasis was applied in the mastering of some
CDs issued in the early 1980s. These included many classical music
albums, as well as now sought-after issues of albums by The Beatles,
Pink Floyd and others. Pre-emphasis should be removed at playback time
by a de-emphasis filter in the playback device. However, not all modern
CD players have this filter, and very few PC CD drives have it; playing
pre-emphasised audio without the correct de-emphasis filter results in
audio that sounds harsh and is far from what its creators intended.
The de-emphasis filter is implemented as a biquad and requires the
input audio sample rate to be either 44.1kHz or 48kHz. Maximum
deviation from the ideal response is only 0.06dB (up to 20kHz).
See Also
--------
bass, treble
'''
effect_args = ['deemph']
self.effects.extend(effect_args)
self.effects_log.append('deemph')
return self
def delay(self, positions):
'''Delay one or more audio channels such that they start at the given
positions.
Parameters
----------
positions: list of floats
List of times (in seconds) to delay each audio channel.
If fewer positions are given than the number of channels, the
remaining channels will be unaffected.
'''
if not isinstance(positions, list):
raise ValueError("positions must be a a list of numbers")
if not all((is_number(p) and p >= 0) for p in positions):
raise ValueError("positions must be positive nubmers")
effect_args = ['delay']
effect_args.extend(['{:f}'.format(p) for p in positions])
self.effects.extend(effect_args)
self.effects_log.append('delay')
return self
def downsample(self, factor=2):
'''Downsample the signal by an integer factor. Only the first out of
each factor samples is retained, the others are discarded.
No decimation filter is applied. If the input is not a properly
bandlimited baseband signal, aliasing will occur. This may be desirable
e.g., for frequency translation.
For a general resampling effect with anti-aliasing, see rate.
Parameters
----------
factor : int, default=2
Downsampling factor.
See Also
--------
rate, upsample
'''
if not isinstance(factor, int) or factor < 1:
raise ValueError('factor must be a positive integer.')
effect_args = ['downsample', '{}'.format(factor)]
self.effects.extend(effect_args)
self.effects_log.append('downsample')
return self
def earwax(self):
'''Makes audio easier to listen to on headphones. Adds ‘cues’ to 44.1kHz
stereo audio so that when listened to on headphones the stereo image is
moved from inside your head (standard for headphones) to outside and in
front of the listener (standard for speakers).
Warning: Will only work properly on 44.1kHz stereo audio!
'''
effect_args = ['earwax']
self.effects.extend(effect_args)
self.effects_log.append('earwax')
return self
def echo(self, gain_in=0.8, gain_out=0.9, n_echos=1, delays=[60],
decays=[0.4]):
'''Add echoing to the audio.
Echoes are reflected sound and can occur naturally amongst mountains
(and sometimes large buildings) when talking or shouting; digital echo
effects emulate this behav- iour and are often used to help fill out
the sound of a single instrument or vocal. The time differ- ence
between the original signal and the reflection is the 'delay' (time),
and the loudness of the reflected signal is the 'decay'. Multiple
echoes can have different delays and decays.
Parameters
----------
gain_in : float, default=0.8
Input volume, between 0 and 1
gain_out : float, default=0.9
Output volume, between 0 and 1
n_echos : int, default=1
Number of reflections
delays : list, default=[60]
List of delays in miliseconds
decays : list, default=[0.4]
List of decays, relative to gain in between 0 and 1
See Also
--------
echos, reverb, chorus
'''
if not is_number(gain_in) or gain_in <= 0 or gain_in > 1:
raise ValueError("gain_in must be a number between 0 and 1.")
if not is_number(gain_out) or gain_out <= 0 or gain_out > 1:
raise ValueError("gain_out must be a number between 0 and 1.")
if not isinstance(n_echos, int) or n_echos <= 0:
raise ValueError("n_echos must be a positive integer.")
# validate delays
if not isinstance(delays, list):
raise ValueError("delays must be a list")
if len(delays) != n_echos:
raise ValueError("the length of delays must equal n_echos")
if any((not is_number(p) or p <= 0) for p in delays):
raise ValueError("the elements of delays must be numbers > 0")
# validate decays
if not isinstance(decays, list):
raise ValueError("decays must be a list")
if len(decays) != n_echos:
raise ValueError("the length of decays must equal n_echos")
if any((not is_number(p) or p <= 0 or p > 1) for p in decays):
raise ValueError(
"the elements of decays must be between 0 and 1"
)
effect_args = ['echo', '{:f}'.format(gain_in), '{:f}'.format(gain_out)]
for i in range(n_echos):
effect_args.extend([
'{}'.format(delays[i]),
'{}'.format(decays[i])
])
self.effects.extend(effect_args)
self.effects_log.append('echo')
return self
def echos(self, gain_in=0.8, gain_out=0.9, n_echos=1, delays=[60],
decays=[0.4]):
'''Add a sequence of echoes to the audio.
Like the echo effect, echos stand for ‘ECHO in Sequel’, that is the
first echos takes the input, the second the input and the first echos,
the third the input and the first and the second echos, ... and so on.
Care should be taken using many echos; a single echos has the same
effect as a single echo.
Parameters
----------
gain_in : float, default=0.8
Input volume, between 0 and 1
gain_out : float, default=0.9
Output volume, between 0 and 1
n_echos : int, default=1
Number of reflections
delays : list, default=[60]
List of delays in miliseconds
decays : list, default=[0.4]
List of decays, relative to gain in between 0 and 1
See Also
--------
echo, reverb, chorus
'''
if not is_number(gain_in) or gain_in <= 0 or gain_in > 1:
raise ValueError("gain_in must be a number between 0 and 1.")
if not is_number(gain_out) or gain_out <= 0 or gain_out > 1:
raise ValueError("gain_out must be a number between 0 and 1.")
if not isinstance(n_echos, int) or n_echos <= 0:
raise ValueError("n_echos must be a positive integer.")
# validate delays
if not isinstance(delays, list):
raise ValueError("delays must be a list")
if len(delays) != n_echos:
raise ValueError("the length of delays must equal n_echos")
if any((not is_number(p) or p <= 0) for p in delays):
raise ValueError("the elements of delays must be numbers > 0")
# validate decays
if not isinstance(decays, list):
raise ValueError("decays must be a list")
if len(decays) != n_echos:
raise ValueError("the length of decays must equal n_echos")
if any((not is_number(p) or p <= 0 or p > 1) for p in decays):
raise ValueError(
"the elements of decays must be between 0 and 1"
)
effect_args = [
'echos', '{:f}'.format(gain_in), '{:f}'.format(gain_out)
]
for i in range(n_echos):
effect_args.extend([
'{:f}'.format(delays[i]),
'{:f}'.format(decays[i])
])
self.effects.extend(effect_args)
self.effects_log.append('echos')
return self
def equalizer(self, frequency, width_q, gain_db):
'''Apply a two-pole peaking equalisation (EQ) filter to boost or
reduce around a given frequency.
This effect can be applied multiple times to produce complex EQ curves.
Parameters
----------
frequency : float
The filter's central frequency in Hz.
width_q : float
The filter's width as a Q-factor.
gain_db : float
The filter's gain in dB.
See Also
--------
bass, treble
'''
if not is_number(frequency) or frequency <= 0:
raise ValueError("frequency must be a positive number.")
if not is_number(width_q) or width_q <= 0:
raise ValueError("width_q must be a positive number.")
if not is_number(gain_db):
raise ValueError("gain_db must be a number.")
effect_args = [
'equalizer',
'{:f}'.format(frequency),
'{:f}q'.format(width_q),
'{:f}'.format(gain_db)
]
self.effects.extend(effect_args)
self.effects_log.append('equalizer')
return self
def fade(self, fade_in_len=0.0, fade_out_len=0.0, fade_shape='q'):
'''Add a fade in and/or fade out to an audio file.
Default fade shape is 1/4 sine wave.
Parameters
----------
fade_in_len : float, default=0.0
Length of fade-in (seconds). If fade_in_len = 0,
no fade in is applied.
fade_out_len : float, defaut=0.0
Length of fade-out (seconds). If fade_out_len = 0,
no fade in is applied.
fade_shape : str, default='q'
Shape of fade. Must be one of
* 'q' for quarter sine (default),
* 'h' for half sine,
* 't' for linear,
* 'l' for logarithmic
* 'p' for inverted parabola.
See Also
--------
splice
'''
fade_shapes = ['q', 'h', 't', 'l', 'p']
if fade_shape not in fade_shapes:
raise ValueError(
"Fade shape must be one of {}".format(" ".join(fade_shapes))
)
if not is_number(fade_in_len) or fade_in_len < 0:
raise ValueError("fade_in_len must be a nonnegative number.")
if not is_number(fade_out_len) or fade_out_len < 0:
raise ValueError("fade_out_len must be a nonnegative number.")
effect_args = []
if fade_in_len > 0:
effect_args.extend([
'fade', '{}'.format(fade_shape), '{:f}'.format(fade_in_len)
])
if fade_out_len > 0:
effect_args.extend([
'reverse', 'fade', '{}'.format(fade_shape),
'{:f}'.format(fade_out_len), 'reverse'
])
if len(effect_args) > 0:
self.effects.extend(effect_args)
self.effects_log.append('fade')
return self
def fir(self, coefficients):
'''Use SoX’s FFT convolution engine with given FIR filter coefficients.
Parameters
----------
coefficients : list
fir filter coefficients
'''
if not isinstance(coefficients, list):
raise ValueError("coefficients must be a list.")
if not all([is_number(c) for c in coefficients]):
raise ValueError("coefficients must be numbers.")
effect_args = ['fir']
effect_args.extend(['{:f}'.format(c) for c in coefficients])
self.effects.extend(effect_args)
self.effects_log.append('fir')
return self
def flanger(self, delay=0, depth=2, regen=0, width=71, speed=0.5,
shape='sine', phase=25, interp='linear'):
'''Apply a flanging effect to the audio.
Parameters
----------
delay : float, default=0
Base delay (in miliseconds) between 0 and 30.
depth : float, default=2
Added swept delay (in miliseconds) between 0 and 10.
regen : float, default=0
Percentage regeneration between -95 and 95.
width : float, default=71,
Percentage of delayed signal mixed with original between 0 and 100.
speed : float, default=0.5
Sweeps per second (in Hz) between 0.1 and 10.
shape : 'sine' or 'triangle', default='sine'
Swept wave shape
phase : float, default=25
Swept wave percentage phase-shift for multi-channel flange between
0 and 100. 0 = 100 = same phase on each channel
interp : 'linear' or 'quadratic', default='linear'
Digital delay-line interpolation type.
See Also
--------
tremolo
'''
if not is_number(delay) or delay < 0 or delay > 30:
raise ValueError("delay must be a number between 0 and 30.")
if not is_number(depth) or depth < 0 or depth > 10:
raise ValueError("depth must be a number between 0 and 10.")
if not is_number(regen) or regen < -95 or regen > 95:
raise ValueError("regen must be a number between -95 and 95.")
if not is_number(width) or width < 0 or width > 100:
raise ValueError("width must be a number between 0 and 100.")
if not is_number(speed) or speed < 0.1 or speed > 10:
raise ValueError("speed must be a number between 0.1 and 10.")
if shape not in ['sine', 'triangle']:
raise ValueError("shape must be one of 'sine' or 'triangle'.")
if not is_number(phase) or phase < 0 or phase > 100:
raise ValueError("phase must be a number between 0 and 100.")
if interp not in ['linear', 'quadratic']:
raise ValueError("interp must be one of 'linear' or 'quadratic'.")
effect_args = [
'flanger',
'{:f}'.format(delay),
'{:f}'.format(depth),
'{:f}'.format(regen),
'{:f}'.format(width),
'{:f}'.format(speed),
'{}'.format(shape),
'{:f}'.format(phase),
'{}'.format(interp)
]
self.effects.extend(effect_args)
self.effects_log.append('flanger')
return self
def gain(self, gain_db=0.0, normalize=True, limiter=False, balance=None):
'''Apply amplification or attenuation to the audio signal.
Parameters
----------
gain_db : float, default=0.0
Gain adjustment in decibels (dB).
normalize : bool, default=True
If True, audio is normalized to gain_db relative to full scale.
If False, simply adjusts the audio power level by gain_db.
limiter : bool, default=False
If True, a simple limiter is invoked to prevent clipping.
balance : str or None, default=None
Balance gain across channels. Can be one of:
* None applies no balancing (default)
* 'e' applies gain to all channels other than that with the
highest peak level, such that all channels attain the same
peak level
* 'B' applies gain to all channels other than that with the
highest RMS level, such that all channels attain the same
RMS level
* 'b' applies gain with clipping protection to all channels other
than that with the highest RMS level, such that all channels
attain the same RMS level
If normalize=True, 'B' and 'b' are equivalent.
See Also
--------
loudness
'''
if not is_number(gain_db):
raise ValueError("gain_db must be a number.")
if not isinstance(normalize, bool):
raise ValueError("normalize must be a boolean.")
if not isinstance(limiter, bool):
raise ValueError("limiter must be a boolean.")
if balance not in [None, 'e', 'B', 'b']:
raise ValueError("balance must be one of None, 'e', 'B', or 'b'.")
effect_args = ['gain']
if balance is not None:
effect_args.append('-{}'.format(balance))
if normalize:
effect_args.append('-n')
if limiter:
effect_args.append('-l')
effect_args.append('{:f}'.format(gain_db))
self.effects.extend(effect_args)
self.effects_log.append('gain')
return self
def highpass(self, frequency, width_q=0.707, n_poles=2):
'''Apply a high-pass filter with 3dB point frequency. The filter can be
either single-pole or double-pole. The filters roll off at 6dB per pole
per octave (20dB per pole per decade).
Parameters
----------
frequency : float
The filter's cutoff frequency in Hz.
width_q : float, default=0.707
The filter's width as a Q-factor. Applies only when n_poles=2.
The default gives a Butterworth response.
n_poles : int, default=2
The number of poles in the filter. Must be either 1 or 2
See Also
--------
lowpass, equalizer, sinc, allpass
'''
if not is_number(frequency) or frequency <= 0:
raise ValueError("frequency must be a positive number.")
if not is_number(width_q) or width_q <= 0:
raise ValueError("width_q must be a positive number.")
if n_poles not in [1, 2]:
raise ValueError("n_poles must be 1 or 2.")
effect_args = [
'highpass', '-{}'.format(n_poles), '{:f}'.format(frequency)
]
if n_poles == 2:
effect_args.append('{:f}q'.format(width_q))
self.effects.extend(effect_args)
self.effects_log.append('highpass')
return self
def lowpass(self, frequency, width_q=0.707, n_poles=2):
'''Apply a low-pass filter with 3dB point frequency. The filter can be
either single-pole or double-pole. The filters roll off at 6dB per pole
per octave (20dB per pole per decade).
Parameters
----------
frequency : float
The filter's cutoff frequency in Hz.
width_q : float, default=0.707
The filter's width as a Q-factor. Applies only when n_poles=2.
The default gives a Butterworth response.
n_poles : int, default=2
The number of poles in the filter. Must be either 1 or 2
See Also
--------
highpass, equalizer, sinc, allpass
'''
if not is_number(frequency) or frequency <= 0:
raise ValueError("frequency must be a positive number.")
if not is_number(width_q) or width_q <= 0:
raise ValueError("width_q must be a positive number.")
if n_poles not in [1, 2]:
raise ValueError("n_poles must be 1 or 2.")
effect_args = [
'lowpass', '-{}'.format(n_poles), '{:f}'.format(frequency)
]
if n_poles == 2:
effect_args.append('{:f}q'.format(width_q))
self.effects.extend(effect_args)
self.effects_log.append('lowpass')
return self
def hilbert(self, num_taps=None):
'''Apply an odd-tap Hilbert transform filter, phase-shifting the signal
by 90 degrees. This is used in many matrix coding schemes and for
analytic signal generation. The process is often written as a
multiplication by i (or j), the imaginary unit. An odd-tap Hilbert
transform filter has a bandpass characteristic, attenuating the lowest
and highest frequencies.
Parameters
----------
num_taps : int or None, default=None
Number of filter taps - must be odd. If none, it is chosen to have
a cutoff frequency of about 75 Hz.
'''
if num_taps is not None and not isinstance(num_taps, int):
raise ValueError("num taps must be None or an odd integer.")
if num_taps is not None and num_taps % 2 == 0:
raise ValueError("num_taps must an odd integer.")
effect_args = ['hilbert']
if num_taps is not None:
effect_args.extend(['-n', '{}'.format(num_taps)])
self.effects.extend(effect_args)
self.effects_log.append('hilbert')
return self
def loudness(self, gain_db=-10.0, reference_level=65.0):
'''Loudness control. Similar to the gain effect, but provides
equalisation for the human auditory system.
The gain is adjusted by gain_db and the signal is equalised according
to ISO 226 w.r.t. reference_level.
Parameters
----------
gain_db : float, default=-10.0
Loudness adjustment amount (in dB)
reference_level : float, default=65.0
Reference level (in dB) according to which the signal is equalized.
Must be between 50 and 75 (dB)
See Also
--------
gain
'''
if not is_number(gain_db):
raise ValueError('gain_db must be a number.')
if not is_number(reference_level):
raise ValueError('reference_level must be a number')
if reference_level > 75 or reference_level < 50:
raise ValueError('reference_level must be between 50 and 75')
effect_args = [
'loudness',
'{:f}'.format(gain_db),
'{:f}'.format(reference_level)
]
self.effects.extend(effect_args)
self.effects_log.append('loudness')
return self
def mcompand(self, n_bands=2, crossover_frequencies=[1600],
attack_time=[0.005, 0.000625], decay_time=[0.1, 0.0125],
soft_knee_db=[6.0, None],
tf_points=[[(-47, -40), (-34, -34), (-17, -33), (0, 0)],
[(-47, -40), (-34, -34), (-15, -33), (0, 0)]],
gain=[None, None]):
'''The multi-band compander is similar to the single-band compander but
the audio is first divided into bands using Linkwitz-Riley cross-over
filters and a separately specifiable compander run on each band.
When used with n_bands=1, this effect is identical to compand.
When using n_bands > 1, the first set of arguments applies a single
band compander, and each subsequent set of arugments is applied on
each of the crossover frequencies.
Parameters
----------
n_bands : int, default=2
The number of bands.
crossover_frequencies : list of float, default=[1600]
A list of crossover frequencies in Hz of length n_bands-1.
The first band is always the full spectrum, followed by the bands
specified by crossover_frequencies.
attack_time : list of float, default=[0.005, 0.000625]
A list of length n_bands, where each element is the time in seconds
over which the instantaneous level of the input signal is averaged
to determine increases in volume over the current band.
decay_time : list of float, default=[0.1, 0.0125]
A list of length n_bands, where each element is the time in seconds
over which the instantaneous level of the input signal is averaged
to determine decreases in volume over the current band.
soft_knee_db : list of float or None, default=[6.0, None]
A list of length n_bands, where each element is the ammount (in dB)
for which the points at where adjacent line segments on the
transfer function meet will be rounded over the current band.
If None, no soft_knee is applied.
tf_points : list of list of tuples, default=[
[(-47, -40), (-34, -34), (-17, -33), (0, 0)],
[(-47, -40), (-34, -34), (-15, -33), (0, 0)]]
A list of length n_bands, where each element is the transfer
function points as a list of tuples corresponding to points in
(dB, dB) defining the compander's transfer function over the
current band.
gain : list of floats or None
A list of gain values for each frequency band.
If None, no gain is applied.
See Also
--------
compand, contrast
'''
if not isinstance(n_bands, int) or n_bands < 1:
raise ValueError("n_bands must be a positive integer.")
if (not isinstance(crossover_frequencies, list) or
len(crossover_frequencies) != n_bands - 1):
raise ValueError(
"crossover_frequences must be a list of length n_bands - 1"
)
if any([not is_number(f) or f < 0 for f in crossover_frequencies]):
raise ValueError(
"crossover_frequencies elements must be positive floats."
)
if not isinstance(attack_time, list) or len(attack_time) != n_bands:
raise ValueError("attack_time must be a list of length n_bands")
if any([not is_number(a) or a <= 0 for a in attack_time]):
raise ValueError("attack_time elements must be positive numbers.")
if not isinstance(decay_time, list) or len(decay_time) != n_bands:
raise ValueError("decay_time must be a list of length n_bands")
if any([not is_number(d) or d <= 0 for d in decay_time]):
raise ValueError("decay_time elements must be positive numbers.")
if any([a > d for a, d in zip(attack_time, decay_time)]):
logger.warning(
"Elements of attack_time are larger than decay_time.\n"
"For most situations, attack_time should be shorter than "
"decay time because the human ear is more sensitive to sudden "
"loud music than sudden soft music."
)
if not isinstance(soft_knee_db, list) or len(soft_knee_db) != n_bands:
raise ValueError("soft_knee_db must be a list of length n_bands.")
if any([(not is_number(d) and d is not None) for d in soft_knee_db]):
raise ValueError(
"elements of soft_knee_db must be a number or None."
)
if not isinstance(tf_points, list) or len(tf_points) != n_bands:
raise ValueError("tf_points must be a list of length n_bands.")
if any([not isinstance(t, list) or len(t) == 0 for t in tf_points]):
raise ValueError(
"tf_points must be a list with at least one point."
)
for tfp in tf_points:
if any(not isinstance(pair, tuple) for pair in tfp):
raise ValueError("elements of tf_points lists must be pairs")
if any(len(pair) != 2 for pair in tfp):
raise ValueError("Tuples in tf_points lists must be length 2")
if any(not (is_number(p[0]) and is_number(p[1])) for p in tfp):
raise ValueError(
"Tuples in tf_points lists must be pairs of numbers."
)
if any((p[0] > 0 or p[1] > 0) for p in tfp):
raise ValueError(
"Tuple values in tf_points lists must be <= 0 (dB)."
)
if len(tfp) > len(set([p[0] for p in tfp])):
raise ValueError("Found duplicate x-value in tf_points list.")
if not isinstance(gain, list) or len(gain) != n_bands:
raise ValueError("gain must be a list of length n_bands")
if any([not (is_number(g) or g is None) for g in gain]):
raise ValueError("gain elements must be numbers or None.")
effect_args = ['mcompand']
for i in range(n_bands):
if i > 0:
effect_args.append('{:f}'.format(crossover_frequencies[i - 1]))
intermed_args = ["{:f},{:f}".format(attack_time[i], decay_time[i])]
tf_points_band = tf_points[i]
tf_points_band = sorted(
tf_points_band,
key=lambda tf_points_band: tf_points_band[0]
)
transfer_list = []
for point in tf_points_band:
transfer_list.extend([
"{:f}".format(point[0]), "{:f}".format(point[1])
])
if soft_knee_db[i] is not None:
intermed_args.append(
"{:f}:{}".format(soft_knee_db[i], ",".join(transfer_list))
)
else:
intermed_args.append(",".join(transfer_list))
if gain[i] is not None:
intermed_args.append("{:f}".format(gain[i]))
effect_args.append(' '.join(intermed_args))
self.effects.extend(effect_args)
self.effects_log.append('mcompand')
return self
def noiseprof(self, input_filepath, profile_path):
'''Calculate a profile of the audio for use in noise reduction.
Running this command does not effect the Transformer effects
chain. When this function is called, the calculated noise profile
file is saved to the `profile_path`.
Parameters
----------
input_filepath : str
Path to audiofile from which to compute a noise profile.
profile_path : str
Path to save the noise profile file.
See Also
--------
noisered
'''
if os.path.isdir(profile_path):
raise ValueError(
"profile_path {} is a directory.".format(profile_path))
if os.path.dirname(profile_path) == '' and profile_path != '':
_abs_profile_path = os.path.join(os.getcwd(), profile_path)
else:
_abs_profile_path = profile_path
if not os.access(os.path.dirname(_abs_profile_path), os.W_OK):
raise IOError(
"profile_path {} is not writeable.".format(_abs_profile_path))
effect_args = ['noiseprof', profile_path]
self.build(input_filepath, '-n', extra_args=effect_args)
return None
def noisered(self, profile_path, amount=0.5):
'''Reduce noise in the audio signal by profiling and filtering.
This effect is moderately effective at removing consistent
background noise such as hiss or hum.
Parameters
----------
profile_path : str
Path to a noise profile file.
This file can be generated using the `noiseprof` effect.
amount : float, default=0.5
How much noise should be removed is specified by amount. Should
be between 0 and 1. Higher numbers will remove more noise but
present a greater likelihood of removing wanted components of
the audio signal.
See Also
--------
noiseprof
'''
if not os.path.exists(profile_path):
raise IOError(
"profile_path {} does not exist.".format(profile_path))
if not is_number(amount) or amount < 0 or amount > 1:
raise ValueError("amount must be a number between 0 and 1.")
effect_args = [
'noisered',
profile_path,
'{:f}'.format(amount)
]
self.effects.extend(effect_args)
self.effects_log.append('noisered')
return self
def norm(self, db_level=-3.0):
'''Normalize an audio file to a particular db level.
This behaves identically to the gain effect with normalize=True.
Parameters
----------
db_level : float, default=-3.0
Output volume (db)
See Also
--------
gain, loudness
'''
if not is_number(db_level):
raise ValueError('db_level must be a number.')
effect_args = [
'norm',
'{:f}'.format(db_level)
]
self.effects.extend(effect_args)
self.effects_log.append('norm')
return self
def oops(self):
'''Out Of Phase Stereo effect. Mixes stereo to twin-mono where each
mono channel contains the difference between the left and right stereo
channels. This is sometimes known as the 'karaoke' effect as it often
has the effect of removing most or all of the vocals from a recording.
'''
effect_args = ['oops']
self.effects.extend(effect_args)
self.effects_log.append('oops')
return self
def overdrive(self, gain_db=20.0, colour=20.0):
'''Apply non-linear distortion.
Parameters
----------
gain_db : float, default=20
Controls the amount of distortion (dB).
colour : float, default=20
Controls the amount of even harmonic content in the output (dB).
'''
if not is_number(gain_db):
raise ValueError('db_level must be a number.')
if not is_number(colour):
raise ValueError('colour must be a number.')
effect_args = [
'overdrive',
'{:f}'.format(gain_db),
'{:f}'.format(colour)
]
self.effects.extend(effect_args)
self.effects_log.append('overdrive')
return self
def pad(self, start_duration=0.0, end_duration=0.0):
'''Add silence to the beginning or end of a file.
Calling this with the default arguments has no effect.
Parameters
----------
start_duration : float
Number of seconds of silence to add to beginning.
end_duration : float
Number of seconds of silence to add to end.
See Also
--------
delay
'''
if not is_number(start_duration) or start_duration < 0:
raise ValueError("Start duration must be a positive number.")
if not is_number(end_duration) or end_duration < 0:
raise ValueError("End duration must be positive.")
effect_args = [
'pad',
'{:f}'.format(start_duration),
'{:f}'.format(end_duration)
]
self.effects.extend(effect_args)
self.effects_log.append('pad')
return self
def phaser(self, gain_in=0.8, gain_out=0.74, delay=3, decay=0.4, speed=0.5,
modulation_shape='sinusoidal'):
'''Apply a phasing effect to the audio.
Parameters
----------
gain_in : float, default=0.8
Input volume between 0 and 1
gain_out: float, default=0.74
Output volume between 0 and 1
delay : float, default=3
Delay in miliseconds between 0 and 5
decay : float, default=0.4
Decay relative to gain_in, between 0.1 and 0.5.
speed : float, default=0.5
Modulation speed in Hz, between 0.1 and 2
modulation_shape : str, defaul='sinusoidal'
Modulation shpae. One of 'sinusoidal' or 'triangular'
See Also
--------
flanger, tremolo
'''
if not is_number(gain_in) or gain_in <= 0 or gain_in > 1:
raise ValueError("gain_in must be a number between 0 and 1.")
if not is_number(gain_out) or gain_out <= 0 or gain_out > 1:
raise ValueError("gain_out must be a number between 0 and 1.")
if not is_number(delay) or delay <= 0 or delay > 5:
raise ValueError("delay must be a positive number.")
if not is_number(decay) or decay < 0.1 or decay > 0.5:
raise ValueError("decay must be a number between 0.1 and 0.5.")
if not is_number(speed) or speed < 0.1 or speed > 2:
raise ValueError("speed must be a positive number.")
if modulation_shape not in ['sinusoidal', 'triangular']:
raise ValueError(
"modulation_shape must be one of 'sinusoidal', 'triangular'."
)
effect_args = [
'phaser',
'{:f}'.format(gain_in),
'{:f}'.format(gain_out),
'{:f}'.format(delay),
'{:f}'.format(decay),
'{:f}'.format(speed)
]
if modulation_shape == 'sinusoidal':
effect_args.append('-s')
elif modulation_shape == 'triangular':
effect_args.append('-t')
self.effects.extend(effect_args)
self.effects_log.append('phaser')
return self
def pitch(self, n_semitones, quick=False):
'''Pitch shift the audio without changing the tempo.
This effect uses the WSOLA algorithm. The audio is chopped up into
segments which are then shifted in the time domain and overlapped
(cross-faded) at points where their waveforms are most similar as
determined by measurement of least squares.
Parameters
----------
n_semitones : float
The number of semitones to shift. Can be positive or negative.
quick : bool, default=False
If True, this effect will run faster but with lower sound quality.
See Also
--------
bend, speed, tempo
'''
if not is_number(n_semitones):
raise ValueError("n_semitones must be a positive number")
if n_semitones < -12 or n_semitones > 12:
logger.warning(
"Using an extreme pitch shift. "
"Quality of results will be poor"
)
if not isinstance(quick, bool):
raise ValueError("quick must be a boolean.")
effect_args = ['pitch']
if quick:
effect_args.append('-q')
effect_args.append('{:f}'.format(n_semitones * 100.))
self.effects.extend(effect_args)
self.effects_log.append('pitch')
return self
def rate(self, samplerate, quality='h'):
'''Change the audio sampling rate (i.e. resample the audio) to any
given `samplerate`. Better the resampling quality = slower runtime.
Parameters
----------
samplerate : float
Desired sample rate.
quality : str
Resampling quality. One of:
* q : Quick - very low quality,
* l : Low,
* m : Medium,
* h : High (default),
* v : Very high
See Also
--------
upsample, downsample, convert
'''
quality_vals = ['q', 'l', 'm', 'h', 'v']
if not is_number(samplerate) or samplerate <= 0:
raise ValueError("Samplerate must be a positive number.")
if quality not in quality_vals:
raise ValueError(
"Quality must be one of {}.".format(' '.join(quality_vals))
)
effect_args = [
'rate',
'-{}'.format(quality),
'{:f}'.format(samplerate)
]
self.effects.extend(effect_args)
self.effects_log.append('rate')
return self
def remix(self, remix_dictionary=None, num_output_channels=None):
'''Remix the channels of an audio file.
Note: volume options are not yet implemented
Parameters
----------
remix_dictionary : dict or None
Dictionary mapping output channel to list of input channel(s).
Empty lists indicate the corresponding output channel should be
empty. If None, mixes all channels down to a single mono file.
num_output_channels : int or None
The number of channels in the output file. If None, the number of
output channels is equal to the largest key in remix_dictionary.
If remix_dictionary is None, this variable is ignored.
Examples
--------
Remix a 4-channel input file. The output file will have
input channel 2 in channel 1, a mixdown of input channels 1 an 3 in
channel 2, an empty channel 3, and a copy of input channel 4 in
channel 4.
>>> import sox
>>> tfm = sox.Transformer()
>>> remix_dictionary = {1: [2], 2: [1, 3], 4: [4]}
>>> tfm.remix(remix_dictionary)
'''
if not (isinstance(remix_dictionary, dict) or
remix_dictionary is None):
raise ValueError("remix_dictionary must be a dictionary or None.")
if remix_dictionary is not None:
if not all([isinstance(i, int) and i > 0 for i
in remix_dictionary.keys()]):
raise ValueError(
"remix dictionary must have positive integer keys."
)
if not all([isinstance(v, list) for v
in remix_dictionary.values()]):
raise ValueError("remix dictionary values must be lists.")
for v_list in remix_dictionary.values():
if not all([isinstance(v, int) and v > 0 for v in v_list]):
raise ValueError(
"elements of remix dictionary values must "
"be positive integers"
)
if not ((isinstance(num_output_channels, int) and
num_output_channels > 0) or num_output_channels is None):
raise ValueError(
"num_output_channels must be a positive integer or None."
)
effect_args = ['remix']
if remix_dictionary is None:
effect_args.append('-')
else:
if num_output_channels is None:
num_output_channels = max(remix_dictionary.keys())
for channel in range(1, num_output_channels + 1):
if channel in remix_dictionary.keys():
out_channel = ','.join(
[str(i) for i in remix_dictionary[channel]]
)
else:
out_channel = '0'
effect_args.append(out_channel)
self.effects.extend(effect_args)
self.effects_log.append('remix')
return self
def repeat(self, count=1):
'''Repeat the entire audio count times.
Parameters
----------
count : int, default=1
The number of times to repeat the audio.
'''
if not isinstance(count, int) or count < 1:
raise ValueError("count must be a postive integer.")
effect_args = ['repeat', '{}'.format(count)]
self.effects.extend(effect_args)
self.effects_log.append('repeat')
def reverb(self, reverberance=50, high_freq_damping=50, room_scale=100,
stereo_depth=100, pre_delay=0, wet_gain=0, wet_only=False):
'''Add reverberation to the audio using the ‘freeverb’ algorithm.
A reverberation effect is sometimes desirable for concert halls that
are too small or contain so many people that the hall’s natural
reverberance is diminished. Applying a small amount of stereo reverb
to a (dry) mono signal will usually make it sound more natural.
Parameters
----------
reverberance : float, default=50
Percentage of reverberance
high_freq_damping : float, default=50
Percentage of high-frequency damping.
room_scale : float, default=100
Scale of the room as a percentage.
stereo_depth : float, default=100
Stereo depth as a percentage.
pre_delay : float, default=0
Pre-delay in milliseconds.
wet_gain : float, default=0
Amount of wet gain in dB
wet_only : bool, default=False
If True, only outputs the wet signal.
See Also
--------
echo
'''
if (not is_number(reverberance) or reverberance < 0 or
reverberance > 100):
raise ValueError("reverberance must be between 0 and 100")
if (not is_number(high_freq_damping) or high_freq_damping < 0 or
high_freq_damping > 100):
raise ValueError("high_freq_damping must be between 0 and 100")
if (not is_number(room_scale) or room_scale < 0 or
room_scale > 100):
raise ValueError("room_scale must be between 0 and 100")
if (not is_number(stereo_depth) or stereo_depth < 0 or
stereo_depth > 100):
raise ValueError("stereo_depth must be between 0 and 100")
if not is_number(pre_delay) or pre_delay < 0:
raise ValueError("pre_delay must be a positive number")
if not is_number(wet_gain):
raise ValueError("wet_gain must be a number")
if not isinstance(wet_only, bool):
raise ValueError("wet_only must be a boolean.")
effect_args = ['reverb']
if wet_only:
effect_args.append('-w')
effect_args.extend([
'{:f}'.format(reverberance),
'{:f}'.format(high_freq_damping),
'{:f}'.format(room_scale),
'{:f}'.format(stereo_depth),
'{:f}'.format(pre_delay),
'{:f}'.format(wet_gain)
])
self.effects.extend(effect_args)
self.effects_log.append('reverb')
return self
def reverse(self):
'''Reverse the audio completely
'''
effect_args = ['reverse']
self.effects.extend(effect_args)
self.effects_log.append('reverse')
return self
def silence(self, location=0, silence_threshold=0.1,
min_silence_duration=0.1, buffer_around_silence=False):
'''Removes silent regions from an audio file.
Parameters
----------
location : int, default=0
Where to remove silence. One of:
* 0 to remove silence throughout the file (default),
* 1 to remove silence from the beginning,
* -1 to remove silence from the end,
silence_threshold : float, default=0.1
Silence threshold as percentage of maximum sample amplitude.
Must be between 0 and 100.
min_silence_duration : float, default=0.1
The minimum ammount of time in seconds required for a region to be
considered non-silent.
buffer_around_silence : bool, default=False
If True, leaves a buffer of min_silence_duration around removed
silent regions.
See Also
--------
vad
'''
if location not in [-1, 0, 1]:
raise ValueError("location must be one of -1, 0, 1.")
if not is_number(silence_threshold) or silence_threshold < 0:
raise ValueError(
"silence_threshold must be a number between 0 and 100"
)
elif silence_threshold >= 100:
raise ValueError(
"silence_threshold must be a number between 0 and 100"
)
if not is_number(min_silence_duration) or min_silence_duration <= 0:
raise ValueError(
"min_silence_duration must be a positive number."
)
if not isinstance(buffer_around_silence, bool):
raise ValueError("buffer_around_silence must be a boolean.")
effect_args = []
if location == -1:
effect_args.append('reverse')
if buffer_around_silence:
effect_args.extend(['silence', '-l'])
else:
effect_args.append('silence')
effect_args.extend([
'1',
'{:f}'.format(min_silence_duration),
'{:f}%'.format(silence_threshold)
])
if location == 0:
effect_args.extend([
'-1',
'{:f}'.format(min_silence_duration),
'{:f}%'.format(silence_threshold)
])
if location == -1:
effect_args.append('reverse')
self.effects.extend(effect_args)
self.effects_log.append('silence')
return self
def sinc(self, filter_type='high', cutoff_freq=3000,
stop_band_attenuation=120, transition_bw=None,
phase_response=None):
'''Apply a sinc kaiser-windowed low-pass, high-pass, band-pass, or
band-reject filter to the signal.
Parameters
----------
filter_type : str, default='high'
Type of filter. One of:
- 'high' for a high-pass filter
- 'low' for a low-pass filter
- 'pass' for a band-pass filter
- 'reject' for a band-reject filter
cutoff_freq : float or list, default=3000
A scalar or length 2 list indicating the filter's critical
frequencies. The critical frequencies are given in Hz and must be
positive. For a high-pass or low-pass filter, cutoff_freq
must be a scalar. For a band-pass or band-reject filter, it must be
a length 2 list.
stop_band_attenuation : float, default=120
The stop band attenuation in dB
transition_bw : float, list or None, default=None
The transition band-width in Hz.
If None, sox's default of 5% of the total bandwith is used.
If a float, the given transition bandwith is used for both the
upper and lower bands (if applicable).
If a list, the first argument is used for the lower band and the
second for the upper band.
phase_response : float or None
The filter's phase response between 0 (minimum) and 100 (maximum).
If None, sox's default phase repsonse is used.
See Also
--------
band, bandpass, bandreject, highpass, lowpass
'''
filter_types = ['high', 'low', 'pass', 'reject']
if filter_type not in filter_types:
raise ValueError(
"filter_type must be one of {}".format(', '.join(filter_types))
)
if not (is_number(cutoff_freq) or isinstance(cutoff_freq, list)):
raise ValueError("cutoff_freq must be a number or a list")
if filter_type in ['high', 'low'] and isinstance(cutoff_freq, list):
raise ValueError(
"For filter types 'high' and 'low', "
"cutoff_freq must be a float, not a list"
)
if filter_type in ['pass', 'reject'] and is_number(cutoff_freq):
raise ValueError(
"For filter types 'pass' and 'reject', "
"cutoff_freq must be a list, not a float"
)
if is_number(cutoff_freq) and cutoff_freq <= 0:
raise ValueError("cutoff_freq must be a postive number")
if isinstance(cutoff_freq, list):
if len(cutoff_freq) != 2:
raise ValueError(
"If cutoff_freq is a list it may only have 2 elements."
)
if any([not is_number(f) or f <= 0 for f in cutoff_freq]):
raise ValueError(
"elements of cutoff_freq must be positive numbers"
)
cutoff_freq = sorted(cutoff_freq)
if not is_number(stop_band_attenuation) or stop_band_attenuation < 0:
raise ValueError("stop_band_attenuation must be a positive number")
if not (is_number(transition_bw) or
isinstance(transition_bw, list) or transition_bw is None):
raise ValueError("transition_bw must be a number, a list or None.")
if filter_type in ['high', 'low'] and isinstance(transition_bw, list):
raise ValueError(
"For filter types 'high' and 'low', "
"transition_bw must be a float, not a list"
)
if is_number(transition_bw) and transition_bw <= 0:
raise ValueError("transition_bw must be a postive number")
if isinstance(transition_bw, list):
if any([not is_number(f) or f <= 0 for f in transition_bw]):
raise ValueError(
"elements of transition_bw must be positive numbers"
)
if len(transition_bw) != 2:
raise ValueError(
"If transition_bw is a list it may only have 2 elements."
)
if phase_response is not None and not is_number(phase_response):
raise ValueError("phase_response must be a number or None.")
if (is_number(phase_response) and
(phase_response < 0 or phase_response > 100)):
raise ValueError("phase response must be between 0 and 100")
effect_args = ['sinc']
effect_args.extend(['-a', '{:f}'.format(stop_band_attenuation)])
if phase_response is not None:
effect_args.extend(['-p', '{:f}'.format(phase_response)])
if filter_type == 'high':
if transition_bw is not None:
effect_args.extend(['-t', '{:f}'.format(transition_bw)])
effect_args.append('{:f}'.format(cutoff_freq))
elif filter_type == 'low':
effect_args.append('-{:f}'.format(cutoff_freq))
if transition_bw is not None:
effect_args.extend(['-t', '{:f}'.format(transition_bw)])
else:
if is_number(transition_bw):
effect_args.extend(['-t', '{:f}'.format(transition_bw)])
elif isinstance(transition_bw, list):
effect_args.extend(['-t', '{:f}'.format(transition_bw[0])])
if filter_type == 'pass':
effect_args.append(
'{:f}-{:f}'.format(cutoff_freq[0], cutoff_freq[1])
)
elif filter_type == 'reject':
effect_args.append(
'{:f}-{:f}'.format(cutoff_freq[1], cutoff_freq[0])
)
if isinstance(transition_bw, list):
effect_args.extend(['-t', '{:f}'.format(transition_bw[1])])
self.effects.extend(effect_args)
self.effects_log.append('sinc')
return self
def speed(self, factor):
'''Adjust the audio speed (pitch and tempo together).
Technically, the speed effect only changes the sample rate information,
leaving the samples themselves untouched. The rate effect is invoked
automatically to resample to the output sample rate, using its default
quality/speed. For higher quality or higher speed resampling, in
addition to the speed effect, specify the rate effect with the desired
quality option.
Parameters
----------
factor : float
The ratio of the new speed to the old speed.
For ex. 1.1 speeds up the audio by 10%; 0.9 slows it down by 10%.
Note - this argument is the inverse of what is passed to the sox
stretch effect for consistency with speed.
See Also
--------
rate, tempo, pitch
'''
if not is_number(factor) or factor <= 0:
raise ValueError("factor must be a positive number")
if factor < 0.5 or factor > 2:
logger.warning(
"Using an extreme factor. Quality of results will be poor"
)
effect_args = ['speed', '{:f}'.format(factor)]
self.effects.extend(effect_args)
self.effects_log.append('speed')
return self
def stat(self, input_filepath, scale=None, rms=False):
'''Display time and frequency domain statistical information about the
audio. Audio is passed unmodified through the SoX processing chain.
Unlike other Transformer methods, this does not modify the transformer
effects chain. Instead it computes statistics on the output file that
would be created if the build command were invoked.
Note: The file is downmixed to mono prior to computation.
Parameters
----------
input_filepath : str
Path to input file to compute stats on.
scale : float or None, default=None
If not None, scales the input by the given scale factor.
rms : bool, default=False
If True, scales all values by the average rms amplitude.
Returns
-------
stat_dict : dict
Dictionary of statistics.
See Also
--------
stats, power_spectrum, sox.file_info
'''
effect_args = ['channels', '1', 'stat']
if scale is not None:
if not is_number(scale) or scale <= 0:
raise ValueError("scale must be a positive number.")
effect_args.extend(['-s', '{:f}'.format(scale)])
if rms:
effect_args.append('-rms')
_, _, stat_output = self.build(
input_filepath, '-n', extra_args=effect_args, return_output=True
)
stat_dict = {}
lines = stat_output.split('\n')
for line in lines:
split_line = line.split()
if not split_line:
continue
value = split_line[-1]
key = ' '.join(split_line[:-1])
stat_dict[key.strip(':')] = value
return stat_dict
def power_spectrum(self, input_filepath):
'''Calculates the power spectrum (4096 point DFT). This method
internally invokes the stat command with the -freq option.
Note: The file is downmixed to mono prior to computation.
Parameters
----------
input_filepath : str
Path to input file to compute stats on.
Returns
-------
power_spectrum : list
List of frequency (Hz), amplitude pairs.
See Also
--------
stat, stats, sox.file_info
'''
effect_args = ['channels', '1', 'stat', '-freq']
_, _, stat_output = self.build(
input_filepath, '-n', extra_args=effect_args, return_output=True
)
power_spectrum = []
lines = stat_output.split('\n')
for line in lines:
split_line = line.split()
if len(split_line) != 2:
continue
freq, amp = split_line
power_spectrum.append([float(freq), float(amp)])
return power_spectrum
def stats(self, input_filepath):
'''Display time domain statistical information about the audio
channels. Audio is passed unmodified through the SoX processing chain.
Statistics are calculated and displayed for each audio channel
Unlike other Transformer methods, this does not modify the transformer
effects chain. Instead it computes statistics on the output file that
would be created if the build command were invoked.
Note: The file is downmixed to mono prior to computation.
Parameters
----------
input_filepath : str
Path to input file to compute stats on.
Returns
-------
stats_dict : dict
List of frequency (Hz), amplitude pairs.
See Also
--------
stat, sox.file_info
'''
effect_args = ['channels', '1', 'stats']
_, _, stats_output = self.build(
input_filepath, '-n', extra_args=effect_args, return_output=True
)
stats_dict = {}
lines = stats_output.split('\n')
for line in lines:
split_line = line.split()
if len(split_line) == 0:
continue
value = split_line[-1]
key = ' '.join(split_line[:-1])
stats_dict[key] = value
return stats_dict
def stretch(self, factor, window=20):
'''Change the audio duration (but not its pitch).
**Unless factor is close to 1, use the tempo effect instead.**
This effect is broadly equivalent to the tempo effect with search set
to zero, so in general, its results are comparatively poor; it is
retained as it can sometimes out-perform tempo for small factors.
Parameters
----------
factor : float
The ratio of the new tempo to the old tempo.
For ex. 1.1 speeds up the tempo by 10%; 0.9 slows it down by 10%.
Note - this argument is the inverse of what is passed to the sox
stretch effect for consistency with tempo.
window : float, default=20
Window size in miliseconds
See Also
--------
tempo, speed, pitch
'''
if not is_number(factor) or factor <= 0:
raise ValueError("factor must be a positive number")
if factor < 0.5 or factor > 2:
logger.warning(
"Using an extreme time stretching factor. "
"Quality of results will be poor"
)
if abs(factor - 1.0) > 0.1:
logger.warning(
"For this stretch factor, "
"the tempo effect has better performance."
)
if not is_number(window) or window <= 0:
raise ValueError(
"window must be a positive number."
)
effect_args = ['stretch', '{:f}'.format(1/factor), '{:f}'.format(window)]
self.effects.extend(effect_args)
self.effects_log.append('stretch')
return self
def swap(self):
'''Swap stereo channels. If the input is not stereo, pairs of channels
are swapped, and a possible odd last channel passed through.
E.g., for seven channels, the output order will be 2, 1, 4, 3, 6, 5, 7.
See Also
----------
remix
'''
effect_args = ['swap']
self.effects.extend(effect_args)
self.effects_log.append('swap')
return self
def tempo(self, factor, audio_type=None, quick=False):
'''Time stretch audio without changing pitch.
This effect uses the WSOLA algorithm. The audio is chopped up into
segments which are then shifted in the time domain and overlapped
(cross-faded) at points where their waveforms are most similar as
determined by measurement of least squares.
Parameters
----------
factor : float
The ratio of new tempo to the old tempo.
For ex. 1.1 speeds up the tempo by 10%; 0.9 slows it down by 10%.
audio_type : str
Type of audio, which optimizes algorithm parameters. One of:
* m : Music,
* s : Speech,
* l : Linear (useful when factor is close to 1),
quick : bool, default=False
If True, this effect will run faster but with lower sound quality.
See Also
--------
stretch, speed, pitch
'''
if not is_number(factor) or factor <= 0:
raise ValueError("factor must be a positive number")
if factor < 0.5 or factor > 2:
logger.warning(
"Using an extreme time stretching factor. "
"Quality of results will be poor"
)
if abs(factor - 1.0) <= 0.1:
logger.warning(
"For this stretch factor, "
"the stretch effect has better performance."
)
if audio_type not in [None, 'm', 's', 'l']:
raise ValueError(
"audio_type must be one of None, 'm', 's', or 'l'."
)
if not isinstance(quick, bool):
raise ValueError("quick must be a boolean.")
effect_args = ['tempo']
if quick:
effect_args.append('-q')
if audio_type is not None:
effect_args.append('-{}'.format(audio_type))
effect_args.append('{:f}'.format(factor))
self.effects.extend(effect_args)
self.effects_log.append('tempo')
return self
def treble(self, gain_db, frequency=3000.0, slope=0.5):
'''Boost or cut the treble (lower) frequencies of the audio using a
two-pole shelving filter with a response similar to that of a standard
hi-fi’s tone-controls. This is also known as shelving equalisation.
The filters are described in detail in
http://musicdsp.org/files/Audio-EQ-Cookbook.txt
Parameters
----------
gain_db : float
The gain at the Nyquist frequency.
For a large cut use -20, for a large boost use 20.
frequency : float, default=100.0
The filter's cutoff frequency in Hz.
slope : float, default=0.5
The steepness of the filter's shelf transition.
For a gentle slope use 0.3, and use 1.0 for a steep slope.
See Also
--------
bass, equalizer
'''
if not is_number(gain_db):
raise ValueError("gain_db must be a number")
if not is_number(frequency) or frequency <= 0:
raise ValueError("frequency must be a positive number.")
if not is_number(slope) or slope <= 0 or slope > 1.0:
raise ValueError("width_q must be a positive number.")
effect_args = [
'treble', '{:f}'.format(gain_db), '{:f}'.format(frequency),
'{:f}s'.format(slope)
]
self.effects.extend(effect_args)
self.effects_log.append('treble')
return self
def tremolo(self, speed=6.0, depth=40.0):
'''Apply a tremolo (low frequency amplitude modulation) effect to the
audio. The tremolo frequency in Hz is giv en by speed, and the depth
as a percentage by depth (default 40).
Parameters
----------
speed : float
Tremolo speed in Hz.
depth : float
Tremolo depth as a percentage of the total amplitude.
See Also
--------
flanger
Examples
--------
>>> tfm = sox.Transformer()
For a growl-type effect
>>> tfm.tremolo(speed=100.0)
'''
if not is_number(speed) or speed <= 0:
raise ValueError("speed must be a positive number.")
if not is_number(depth) or depth <= 0 or depth > 100:
raise ValueError("depth must be a positive number less than 100.")
effect_args = [
'tremolo',
'{:f}'.format(speed),
'{:f}'.format(depth)
]
self.effects.extend(effect_args)
self.effects_log.append('tremolo')
return self
def trim(self, start_time, end_time=None):
'''Excerpt a clip from an audio file, given the start timestamp and end timestamp of the clip within the file, expressed in seconds. If the end timestamp is set to `None` or left unspecified, it defaults to the duration of the audio file.
Parameters
----------
start_time : float
Start time of the clip (seconds)
end_time : float or None, default=None
End time of the clip (seconds)
'''
if not is_number(start_time) or start_time < 0:
raise ValueError("start_time must be a positive number.")
effect_args = [
'trim',
'{:f}'.format(start_time)
]
if end_time is not None:
if not is_number(end_time) or end_time < 0:
raise ValueError("end_time must be a positive number.")
if start_time >= end_time:
raise ValueError("start_time must be smaller than end_time.")
effect_args.append('{:f}'.format(end_time - start_time))
self.effects.extend(effect_args)
self.effects_log.append('trim')
return self
def upsample(self, factor=2):
'''Upsample the signal by an integer factor: zero-value samples are
inserted between each pair of input samples. As a result, the original
spectrum is replicated into the new frequency space (imaging) and
attenuated. The upsample effect is typically used in combination with
filtering effects.
Parameters
----------
factor : int, default=2
Integer upsampling factor.
See Also
--------
rate, downsample
'''
if not isinstance(factor, int) or factor < 1:
raise ValueError('factor must be a positive integer.')
effect_args = ['upsample', '{}'.format(factor)]
self.effects.extend(effect_args)
self.effects_log.append('upsample')
return self
def vad(self, location=1, normalize=True, activity_threshold=7.0,
min_activity_duration=0.25, initial_search_buffer=1.0,
max_gap=0.25, initial_pad=0.0):
'''Voice Activity Detector. Attempts to trim silence and quiet
background sounds from the ends of recordings of speech. The algorithm
currently uses a simple cepstral power measurement to detect voice, so
may be fooled by other things, especially music.
The effect can trim only from the front of the audio, so in order to
trim from the back, the reverse effect must also be used.
Parameters
----------
location : 1 or -1, default=1
If 1, trims silence from the beginning
If -1, trims silence from the end
normalize : bool, default=True
If true, normalizes audio before processing.
activity_threshold : float, default=7.0
The measurement level used to trigger activity detection. This may
need to be cahnged depending on the noise level, signal level, and
other characteristics of the input audio.
min_activity_duration : float, default=0.25
The time constant (in seconds) used to help ignore short bursts of
sound.
initial_search_buffer : float, default=1.0
The amount of audio (in seconds) to search for quieter/shorter
bursts of audio to include prior to the detected trigger point.
max_gap : float, default=0.25
The allowed gap (in seconds) between quiteter/shorter bursts of
audio to include prior to the detected trigger point
initial_pad : float, default=0.0
The amount of audio (in seconds) to preserve before the trigger
point and any found quieter/shorter bursts.
See Also
--------
silence
Examples
--------
>>> tfm = sox.Transformer()
Remove silence from the beginning of speech
>>> tfm.vad(initial_pad=0.3)
Remove silence from the end of speech
>>> tfm.vad(location=-1, initial_pad=0.2)
'''
if location not in [-1, 1]:
raise ValueError("location must be -1 or 1.")
if not isinstance(normalize, bool):
raise ValueError("normalize muse be a boolean.")
if not is_number(activity_threshold):
raise ValueError("activity_threshold must be a number.")
if not is_number(min_activity_duration) or min_activity_duration < 0:
raise ValueError("min_activity_duration must be a positive number")
if not is_number(initial_search_buffer) or initial_search_buffer < 0:
raise ValueError("initial_search_buffer must be a positive number")
if not is_number(max_gap) or max_gap < 0:
raise ValueError("max_gap must be a positive number.")
if not is_number(initial_pad) or initial_pad < 0:
raise ValueError("initial_pad must be a positive number.")
effect_args = []
if normalize:
effect_args.append('norm')
if location == -1:
effect_args.append('reverse')
effect_args.extend([
'vad',
'-t', '{:f}'.format(activity_threshold),
'-T', '{:f}'.format(min_activity_duration),
'-s', '{:f}'.format(initial_search_buffer),
'-g', '{:f}'.format(max_gap),
'-p', '{:f}'.format(initial_pad)
])
if location == -1:
effect_args.append('reverse')
self.effects.extend(effect_args)
self.effects_log.append('vad')
return self
def vol(self, gain, gain_type='amplitude', limiter_gain=None):
'''Apply an amplification or an attenuation to the audio signal.
Parameters
----------
gain : float
Interpreted according to the given `gain_type`.
If `gain_type' = 'amplitude', `gain' is a positive amplitude ratio.
If `gain_type' = 'power', `gain' is a power (voltage squared).
If `gain_type' = 'db', `gain' is in decibels.
gain_type : string, default='amplitude'
Type of gain. One of:
- 'amplitude'
- 'power'
- 'db'
limiter_gain : float or None, default=None
If specified, a limiter is invoked on peaks greater than
`limiter_gain' to prevent clipping.
`limiter_gain` should be a positive value much less than 1.
See Also
--------
gain, compand
'''
if not is_number(gain):
raise ValueError('gain must be a number.')
if limiter_gain is not None:
if (not is_number(limiter_gain) or
limiter_gain <= 0 or limiter_gain >= 1):
raise ValueError(
'limiter gain must be a positive number less than 1'
)
if gain_type in ['amplitude', 'power'] and gain < 0:
raise ValueError(
"If gain_type = amplitude or power, gain must be positive."
)
effect_args = ['vol']
effect_args.append('{:f}'.format(gain))
if gain_type == 'amplitude':
effect_args.append('amplitude')
elif gain_type == 'power':
effect_args.append('power')
elif gain_type == 'db':
effect_args.append('dB')
else:
raise ValueError('gain_type must be one of amplitude power or db')
if limiter_gain is not None:
if gain_type in ['amplitude', 'power'] and gain > 1:
effect_args.append('{:f}'.format(limiter_gain))
elif gain_type == 'db' and gain > 0:
effect_args.append('{:f}'.format(limiter_gain))
self.effects.extend(effect_args)
self.effects_log.append('vol')
return self
| 36.943391 | 246 | 0.579367 |
372b78973109279a1686d9f75640f98d78632615 | 3,193 | py | Python | vitrage/opts.py | charliebr30/vitrage | 33e7d9a27a542c3ad4755a03dbbc8ff613caab32 | [
"Apache-2.0"
] | null | null | null | vitrage/opts.py | charliebr30/vitrage | 33e7d9a27a542c3ad4755a03dbbc8ff613caab32 | [
"Apache-2.0"
] | null | null | null | vitrage/opts.py | charliebr30/vitrage | 33e7d9a27a542c3ad4755a03dbbc8ff613caab32 | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 - Alcatel-Lucent
# Copyright 2016 - Nokia
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import os
from oslo_log import log
from oslo_utils import importutils
import vitrage.api
import vitrage.datasources
import vitrage.entity_graph.consistency
import vitrage.evaluator
import vitrage.keystone_client
import vitrage.notifier
import vitrage.os_clients
import vitrage.rpc
LOG = log.getLogger(__name__)
DATASOURCES_PATH = 'vitrage.datasources.'
DATASOURCE_FS_PATH = os.path.join('vitrage', 'datasources')
DRIVER_FILE = 'driver.py'
TRANSFORMER_FILE = 'alarm_transformer_base.py'
def list_opts():
return [
('api', vitrage.api.OPTS),
('datasources', vitrage.datasources.OPTS),
('evaluator', vitrage.evaluator.OPTS),
('consistency', vitrage.entity_graph.consistency.OPTS),
('entity_graph', vitrage.entity_graph.OPTS),
('service_credentials', vitrage.keystone_client.OPTS),
('DEFAULT', itertools.chain(
vitrage.os_clients.OPTS,
vitrage.rpc.OPTS,
vitrage.notifier.OPTS))
]
def datasources_opts():
top = os.getcwd()
datasources = _normalize_path_to_datasource_name(
_filter_folders_containing_transformer(_get_datasources_folders(top)),
top)
return [(datasource, module.OPTS) for datasource in datasources
for module in
[importutils.import_module(DATASOURCES_PATH + datasource)]
if 'OPTS' in module.__dict__]
def _get_datasources_folders(top=os.getcwd()):
return [os.path.dirname(os.path.join(root, name))
for root, dirs, files in os.walk(top, topdown=False)
for name in files if name == DRIVER_FILE]
def _filter_folders_containing_transformer(folders):
return [folder for folder in folders for
root, dirs, files in os.walk(folder, topdown=False) for
name in files if name == TRANSFORMER_FILE]
def _normalize_path_to_datasource_name(path_list, top=os.getcwd()):
return [os.path.relpath(path, os.path.join(top, DATASOURCE_FS_PATH))
.replace(os.sep, '.') for path in path_list]
def register_opts(conf, package_name, paths):
"""register opts of package package_name, with base path in paths"""
for path in paths:
try:
opt = importutils.import_module(
"%s.%s" % (path, package_name)).OPTS
conf.register_opts(
list(opt),
group=None if package_name == 'DEFAULT' else package_name
)
return
except ImportError:
pass
LOG.error("Failed to register config options for %s" % package_name)
| 32.252525 | 78 | 0.690573 |
5273d5fed3d5b3ecf1d76679cbead58213200246 | 13,770 | py | Python | twistedcaldav/memcachepool.py | backwardn/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | [
"Apache-2.0"
] | 462 | 2016-08-14T17:43:24.000Z | 2022-03-17T07:38:16.000Z | twistedcaldav/memcachepool.py | backwardn/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | [
"Apache-2.0"
] | 72 | 2016-09-01T23:19:35.000Z | 2020-02-05T02:09:26.000Z | twistedcaldav/memcachepool.py | backwardn/ccs-calendarserver | 13c706b985fb728b9aab42dc0fef85aae21921c3 | [
"Apache-2.0"
] | 171 | 2016-08-16T03:50:30.000Z | 2022-03-26T11:49:55.000Z | ##
# Copyright (c) 2008-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twisted.python.failure import Failure
from twisted.internet.defer import Deferred, fail
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.protocols.memcache import MemCacheProtocol, NoSuchCommand
from twext.python.log import Logger
from twext.internet.gaiendpoint import GAIEndpoint
from twext.internet.adaptendpoint import connect
from twisted.internet.endpoints import UNIXClientEndpoint
class PooledMemCacheProtocol(MemCacheProtocol):
"""
A MemCacheProtocol that will notify a connectionPool that it is ready
to accept requests.
@ivar factory: A L{MemCacheClientFactory} instance.
"""
factory = None
def connectionMade(self):
"""
Notify our factory that we're ready to accept connections.
"""
MemCacheProtocol.connectionMade(self)
if self.factory.deferred is not None:
self.factory.deferred.callback(self)
self.factory.deferred = None
class MemCacheClientFactory(ReconnectingClientFactory):
"""
A client factory for MemCache that reconnects and notifies a pool of it's
state.
@ivar connectionPool: A managing connection pool that we notify of events.
@ivar deferred: A L{Deferred} that represents the initial connection.
@ivar _protocolInstance: The current instance of our protocol that we pass
to our connectionPool.
"""
log = Logger()
protocol = PooledMemCacheProtocol
connectionPool = None
_protocolInstance = None
def __init__(self):
self.deferred = Deferred()
def clientConnectionLost(self, connector, reason):
"""
Notify the connectionPool that we've lost our connection.
"""
if self.connectionPool.shutdown_requested:
# The reactor is stopping; don't reconnect
return
self.log.error("MemCache connection lost: {r}", r=reason)
if self._protocolInstance is not None:
self.connectionPool.clientBusy(self._protocolInstance)
ReconnectingClientFactory.clientConnectionLost(
self,
connector,
reason)
def clientConnectionFailed(self, connector, reason):
"""
Notify the connectionPool that we're unable to connect
"""
self.log.error("MemCache connection failed: {r}", r=reason)
if self._protocolInstance is not None:
self.connectionPool.clientBusy(self._protocolInstance)
ReconnectingClientFactory.clientConnectionFailed(
self,
connector,
reason)
def buildProtocol(self, addr):
"""
Attach the C{self.connectionPool} to the protocol so it can tell it,
when we've connected.
"""
if self._protocolInstance is not None:
self.connectionPool.clientGone(self._protocolInstance)
self._protocolInstance = self.protocol()
self._protocolInstance.factory = self
return self._protocolInstance
class MemCachePool(object):
"""
A connection pool for MemCacheProtocol instances.
@ivar clientFactory: The L{ClientFactory} implementation that will be used
for each protocol.
@ivar _maxClients: A C{int} indicating the maximum number of clients.
@ivar _endpoint: An L{IStreamClientEndpoint} provider indicating the server
to connect to.
@ivar _reactor: The L{IReactorTCP} provider used to initiate new
connections.
@ivar _busyClients: A C{set} that contains all currently busy clients.
@ivar _freeClients: A C{set} that contains all currently free clients.
@ivar _pendingConnects: A C{int} indicating how many connections are in
progress.
"""
log = Logger()
clientFactory = MemCacheClientFactory
REQUEST_LOGGING_SIZE = 1024
def __init__(self, endpoint, maxClients=5, reactor=None):
"""
@param endpoint: An L{IStreamClientEndpoint} indicating the server to
connect to.
@param maxClients: A C{int} indicating the maximum number of clients.
@param reactor: An L{IReactorTCP} provider used to initiate new
connections.
"""
self._endpoint = endpoint
self._maxClients = maxClients
if reactor is None:
from twisted.internet import reactor
self._reactor = reactor
self.shutdown_deferred = None
self.shutdown_requested = False
reactor.addSystemEventTrigger(
'before', 'shutdown', self._shutdownCallback
)
self._busyClients = set([])
self._freeClients = set([])
self._pendingConnects = 0
self._commands = []
def _isIdle(self):
return (
len(self._busyClients) == 0 and
len(self._commands) == 0 and
self._pendingConnects == 0
)
def _shutdownCallback(self):
self.shutdown_requested = True
if self._isIdle():
return None
self.shutdown_deferred = Deferred()
return self.shutdown_deferred
def _newClientConnection(self):
"""
Create a new client connection.
@return: A L{Deferred} that fires with the L{IProtocol} instance.
"""
self.log.debug(
"Initiating new client connection to: {r!r}", r=self._endpoint
)
self._logClientStats()
self._pendingConnects += 1
def _connected(client):
self._pendingConnects -= 1
return client
factory = self.clientFactory()
factory.noisy = False
factory.connectionPool = self
connect(self._endpoint, factory)
d = factory.deferred
d.addCallback(_connected)
return d
def _performRequestOnClient(self, client, command, *args, **kwargs):
"""
Perform the given request on the given client.
@param client: A L{PooledMemCacheProtocol} that will be used to perform
the given request.
@param command: A C{str} representing an attribute of
L{MemCacheProtocol}.
@param args: Any positional arguments that should be passed to
C{command}.
@param kwargs: Any keyword arguments that should be passed to
C{command}.
@return: A L{Deferred} that fires with the result of the given command.
"""
def _freeClientAfterRequest(result):
self.clientFree(client)
return result
def _reportError(failure):
"""
Upon memcache error, log the failed request along with the error
message and free the client.
"""
self.log.error(
"Memcache error: {ex}; request: {cmd} {args}",
ex=failure.value,
cmd=command,
args=" ".join(args)[:self.REQUEST_LOGGING_SIZE],
)
self.clientFree(client)
self.clientBusy(client)
method = getattr(client, command, None)
if method is not None:
d = method(*args, **kwargs)
else:
d = fail(Failure(NoSuchCommand()))
d.addCallbacks(_freeClientAfterRequest, _reportError)
return d
def performRequest(self, command, *args, **kwargs):
"""
Select an available client and perform the given request on it.
@param command: A C{str} representing an attribute of
L{MemCacheProtocol}.
@param args: Any positional arguments that should be passed to
C{command}.
@param kwargs: Any keyword arguments that should be passed to
C{command}.
@return: A L{Deferred} that fires with the result of the given command.
"""
if len(self._freeClients) > 0:
client = self._freeClients.pop()
d = self._performRequestOnClient(
client, command, *args, **kwargs)
elif (
len(self._busyClients) + self._pendingConnects >= self._maxClients
):
d = Deferred()
self._commands.append((d, command, args, kwargs))
self.log.debug(
"Command queued: {c}, {a!r}, {k!r}", c=command, a=args, k=kwargs
)
self._logClientStats()
else:
d = self._newClientConnection()
d.addCallback(self._performRequestOnClient,
command, *args, **kwargs)
return d
def _logClientStats(self):
self.log.debug(
"Clients #free: {f}, #busy: {b}, #pending: {p}, #queued: {q}",
f=len(self._freeClients),
b=len(self._busyClients),
p=self._pendingConnects,
q=len(self._commands),
)
def clientGone(self, client):
"""
Notify that the given client is to be removed from the pool completely.
@param client: An instance of L{PooledMemCacheProtocol}.
"""
if client in self._busyClients:
self._busyClients.remove(client)
elif client in self._freeClients:
self._freeClients.remove(client)
self.log.debug("Removed client: {c!r}", c=client)
self._logClientStats()
def clientBusy(self, client):
"""
Notify that the given client is being used to complete a request.
@param client: An instance of C{self.clientFactory}
"""
if client in self._freeClients:
self._freeClients.remove(client)
self._busyClients.add(client)
self.log.debug("Busied client: {c!r}", c=client)
self._logClientStats()
def clientFree(self, client):
"""
Notify that the given client is free to handle more requests.
@param client: An instance of C{self.clientFactory}
"""
if client in self._busyClients:
self._busyClients.remove(client)
self._freeClients.add(client)
if self.shutdown_deferred and self._isIdle():
self.shutdown_deferred.callback(None)
if len(self._commands) > 0:
d, command, args, kwargs = self._commands.pop(0)
self.log.debug(
"Performing Queued Command: {c}, {a}, {k}",
c=command, a=args, k=kwargs,
)
self._logClientStats()
_ign_d = self.performRequest(
command, *args, **kwargs)
_ign_d.addCallback(d.callback)
self.log.debug("Freed client: {c!r}", c=client)
self._logClientStats()
def suggestMaxClients(self, maxClients):
"""
Suggest the maximum number of concurrently connected clients.
@param maxClients: A C{int} indicating how many client connections we
should keep open.
"""
self._maxClients = maxClients
def get(self, *args, **kwargs):
return self.performRequest('get', *args, **kwargs)
def set(self, *args, **kwargs):
return self.performRequest('set', *args, **kwargs)
def checkAndSet(self, *args, **kwargs):
return self.performRequest('checkAndSet', *args, **kwargs)
def delete(self, *args, **kwargs):
return self.performRequest('delete', *args, **kwargs)
def add(self, *args, **kwargs):
return self.performRequest('add', *args, **kwargs)
def incr(self, *args, **kwargs):
return self.performRequest('increment', *args, **kwargs)
def decr(self, *args, **kwargs):
return self.performRequest('decrement', *args, **kwargs)
def flushAll(self, *args, **kwargs):
return self.performRequest('flushAll', *args, **kwargs)
class CachePoolUserMixIn(object):
"""
A mixin that returns a saved cache pool or fetches the default cache pool.
@ivar _cachePool: A saved cachePool.
"""
_cachePool = None
_cachePoolHandle = "Default"
def getCachePool(self):
if self._cachePool is None:
return defaultCachePool(self._cachePoolHandle)
return self._cachePool
_memCachePools = {} # Maps a name to a pool object
_memCachePoolHandler = {} # Maps a handler id to a named pool
def installPools(pools, maxClients=5, reactor=None):
if reactor is None:
from twisted.internet import reactor
for name, pool in pools.items():
if pool["ClientEnabled"]:
if pool.get("MemcacheSocket"):
ep = UNIXClientEndpoint(reactor, pool["MemcacheSocket"])
else:
ep = GAIEndpoint(reactor, pool["BindAddress"], pool["Port"])
_installPool(
name,
pool["HandleCacheTypes"],
ep,
maxClients,
reactor,
)
def _installPool(
name, handleTypes, serverEndpoint, maxClients=5, reactor=None
):
pool = MemCachePool(serverEndpoint, maxClients=maxClients, reactor=None)
_memCachePools[name] = pool
for handle in handleTypes:
_memCachePoolHandler[handle] = pool
def defaultCachePool(name):
if name not in _memCachePoolHandler:
name = "Default"
return _memCachePoolHandler[name]
| 30.668151 | 80 | 0.622803 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.