hexsha stringlengths 40 40 | size int64 2 1.02M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 2 1.02M | avg_line_length float64 1 417k | max_line_length int64 1 987k | alphanum_fraction float64 0 1 | content_no_comment stringlengths 0 1.01M | is_comment_constant_removed bool 1
class | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f73aa5cb46401d3cf1db1f98d7bd85961884409c | 9,349 | py | Python | app.py | shinyichen/gaia-visualization | ebbd741a9e5668df4be1f13f432855009f8bc63d | [
"MIT"
] | null | null | null | app.py | shinyichen/gaia-visualization | ebbd741a9e5668df4be1f13f432855009f8bc63d | [
"MIT"
] | 2 | 2019-03-01T00:02:54.000Z | 2019-07-24T22:54:06.000Z | app.py | shinyichen/gaia-visualization | ebbd741a9e5668df4be1f13f432855009f8bc63d | [
"MIT"
] | 2 | 2018-08-16T20:57:03.000Z | 2019-03-05T22:07:28.000Z | import os
from flask import Flask, render_template, abort, request, jsonify
# from model import get_cluster, get_cluster_list, types, recover_doc_online
from model import Model, types
# from setting import repo, port, repositories, upload_folder, import_endpoint
import setting
from setting import url_prefix
import groundtruth as gt
import debug
import requests
from rdflib.plugins.stores.sparqlstore import SPARQLStore
import tmp
import time_person_label
import re
app = Flask(__name__, static_folder='static')
app.jinja_env.globals.update(str=str) # allow str function to be used in template
app.jinja_env.globals.update(round=round) # allow round function to be used in template
app.config['JSON_AS_ASCII'] = True
def generate_pkl(sparql, graph, file_path):
tmp.run(sparql, graph, file_path)
time_person_label.run(sparql, graph, file_path)
@app.route('/')
def index():
repos = {}
for repo in setting.repositories:
repos[repo] = []
endpoint = setting.endpoint + '/' + repo + '/rdf-graphs'
res = requests.get(endpoint, headers={'Accept': 'application/sparql-results+json'},
auth=(setting.username, setting.password))
result = res.json()['results']['bindings']
for r in result:
repos[repo].append(r['contextID']['value'])
return render_template('index.html',
url_prefix=url_prefix,
repos=repos)
@app.route('/repo/<repo>')
def hello_world(repo):
graph_uri = request.args.get('g', '')
sparql = SPARQLStore(setting.endpoint + '/' + repo)
model = Model(sparql, repo, graph_uri)
return render_template('clusters.html',
url_prefix=url_prefix,
repo=repo,
graph=graph_uri,
entities=model.get_cluster_list(types.Entity),
events=model.get_cluster_list(types.Events),
relations=model.get_cluster_list(types.Relation))
@app.route('/js/<path>')
def static_js(path):
return app.send_static_file('js/' + path)
@app.route('/img/<path>')
def static_img(path):
return app.send_static_file('img/' + path)
@app.route('/css/<path>')
def static_css(path):
return app.send_static_file('css/' + path)
@app.route('/viz/<name>')
def show_bidirection_viz(name):
return render_template('viz.html', url_prefix=url_prefix, name=name)
@app.route('/sviz/<name>')
def show_viz(name):
return render_template('sviz.html', url_prefix=url_prefix, name=name)
@app.route('/cluster/entities/<repo>/<uri>')
@app.route('/entities/<repo>/<uri>')
def show_entity_cluster(repo, uri):
i = uri.find('?')
if i > 0:
uri = uri[:i]
uri = 'http://www.isi.edu/gaia/entities/' + uri
graph_uri = request.args.get('g', default=None)
show_image = request.args.get('image', default=True)
show_limit = request.args.get('limit', default=100)
sparql = SPARQLStore(setting.endpoint + '/' + repo)
model = Model(sparql, repo, graph_uri)
return show_cluster(model, uri, show_image, show_limit)
@app.route('/list/<type_>/<repo>')
def show_entity_cluster_list(type_, repo):
graph_uri = request.args.get('g', default=None)
limit = request.args.get('limit', default=100, type=int)
offset = request.args.get('offset', default=0, type=int)
sortby = request.args.get('sortby', default='size')
sparql = SPARQLStore(setting.endpoint + '/' + repo)
model = Model(sparql, repo, graph_uri)
if type_ == 'entity':
return render_template('list.html',
url_prefix=url_prefix,
type_='entity',
repo=repo,
graph=graph_uri,
limit=limit,
offset=offset,
sortby=sortby,
clusters=model.get_cluster_list(types.Entity, limit, offset, sortby))
elif type_ == 'event':
return render_template('list.html',
url_prefix=url_prefix,
type_='event',
repo=repo,
graph=graph_uri,
limit=limit,
offset=offset,
sortby=sortby,
clusters=model.get_cluster_list(types.Events, limit, offset, sortby))
else:
abort(404)
@app.route('/cluster/events/<repo>/<uri>')
@app.route('/events/<repo>/<uri>')
def show_event_cluster(repo, uri):
uri = 'http://www.isi.edu/gaia/events/' + uri
graph_uri = request.args.get('g', default=None)
show_image = request.args.get('image', default=True)
show_limit = request.args.get('limit', default=100)
sparql = SPARQLStore(setting.endpoint + '/' + repo)
model = Model(sparql, repo, graph_uri)
return show_cluster(model, uri, show_image, show_limit)
@app.route('/cluster/<repo>/AIDA/<path:uri>')
def show_columbia_cluster(repo, uri):
graph_uri = request.args.get('g', default=None)
uri = 'http://www.columbia.edu/AIDA/' + uri
show_image = request.args.get('image', default=True)
show_limit = request.args.get('limit', default=100)
sparql = SPARQLStore(setting.endpoint + '/' + repo)
model = Model(sparql, repo, graph_uri)
return show_cluster(model, uri, show_image, show_limit)
def show_cluster(model: Model, uri, show_image=True, show_limit=100):
cluster = model.get_cluster(uri)
show_image = show_image not in {False, 'False', 'false', 'no', '0'}
show_limit = show_limit not in {False, 'False', 'false', 'no', '0'} and (
isinstance(show_limit, int) and show_limit) or (show_limit.isdigit() and int(show_limit))
if not cluster:
abort(404)
print(cluster.href)
return render_template('cluster.html',
url_prefix=url_prefix,
repo=model.repo,
graph=model.graph,
cluster=cluster,
show_image=show_image,
show_limit=show_limit)
@app.route('/report')
def show_report():
update = request.args.get('update', default=False, type=bool)
report = Report(update)
return render_template('report.html', url_prefix=url_prefix, report=report)
@app.route('/doc/<doc_id>')
def show_doc_pronoun(doc_id):
return render_template('doc.html', url_prefix=url_prefix, doc_id=doc_id, content=recover_doc_online(doc_id))
@app.route('/cluster/entities/gt/<repo>')
def show_entity_gt(repo):
uri = request.args.get('e', default=None)
graph_uri = request.args.get('g', default=None)
sparql = SPARQLStore(setting.endpoint + '/' + repo)
model = Model(sparql, repo, graph_uri)
cluster = model.get_cluster(uri)
return render_template('groundtruth.html', url_prefix=url_prefix, repo=repo, graph=graph_uri, cluster=cluster)
@app.route('/cluster/import-debugger')
def show_import_debugger():
return render_template('import-debugger.html', repos=setting.repositories)
@app.route('/import-debugger', methods=['POST'])
def import_clusters():
repo = request.form['repo']
graph_uri = request.form['graph_uri']
debug_file = request.files['debug_file']
if repo and debug_file:
# create directory for debug files if doesn't exist
data_dir = setting.debug_data
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
if graph_uri:
filename = repo + '-' + re.sub('[^0-9a-zA-Z]+', '-', graph_uri)
else:
filename = repo
file = data_dir + '/' + filename + '.jl'
# upload file
debug_file.save(file)
return '''
<!doctype html>
<title>Imported</title>
<h1>Imported</h1>
'''
else:
return '''
<!doctype html>
<title>Invalid</title>
<h1>Invalid</h1>
'''
@app.route('/groundtruth/<repo>', methods=['GET'])
def groundtruth(repo):
graph = request.args.get('g', default=None)
entity_uri = request.args.get('e', default=None)
if entity_uri:
if gt.has_gt(repo, graph):
print("has gt")
gt_cluster = gt.search_cluster(repo, graph, entity_uri)
return jsonify(gt_cluster)
else:
print("no gt")
return not_found()
else:
return jsonify(gt.get_all())
@app.route('/cluster/entities/debug/<repo>', methods=['GET'])
def debugger(repo):
graph = request.args.get('g', default=None)
cluster_uri = request.args.get('cluster')
if cluster_uri:
result = debug.get_debug_for_cluster(repo, graph, cluster_uri)
else:
return not_found()
if result:
return jsonify(result)
else:
return not_found()
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found: ' + request.url,
}
resp = jsonify(message)
resp.status_code = 404
return resp
if __name__ == '__main__':
# app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.debug = True
app.run(host='0.0.0.0', port=setting.port)
| 33.389286 | 114 | 0.609049 | import os
from flask import Flask, render_template, abort, request, jsonify
from model import Model, types
import setting
from setting import url_prefix
import groundtruth as gt
import debug
import requests
from rdflib.plugins.stores.sparqlstore import SPARQLStore
import tmp
import time_person_label
import re
app = Flask(__name__, static_folder='static')
app.jinja_env.globals.update(str=str)
app.jinja_env.globals.update(round=round)
app.config['JSON_AS_ASCII'] = True
def generate_pkl(sparql, graph, file_path):
tmp.run(sparql, graph, file_path)
time_person_label.run(sparql, graph, file_path)
@app.route('/')
def index():
repos = {}
for repo in setting.repositories:
repos[repo] = []
endpoint = setting.endpoint + '/' + repo + '/rdf-graphs'
res = requests.get(endpoint, headers={'Accept': 'application/sparql-results+json'},
auth=(setting.username, setting.password))
result = res.json()['results']['bindings']
for r in result:
repos[repo].append(r['contextID']['value'])
return render_template('index.html',
url_prefix=url_prefix,
repos=repos)
@app.route('/repo/<repo>')
def hello_world(repo):
graph_uri = request.args.get('g', '')
sparql = SPARQLStore(setting.endpoint + '/' + repo)
model = Model(sparql, repo, graph_uri)
return render_template('clusters.html',
url_prefix=url_prefix,
repo=repo,
graph=graph_uri,
entities=model.get_cluster_list(types.Entity),
events=model.get_cluster_list(types.Events),
relations=model.get_cluster_list(types.Relation))
@app.route('/js/<path>')
def static_js(path):
return app.send_static_file('js/' + path)
@app.route('/img/<path>')
def static_img(path):
return app.send_static_file('img/' + path)
@app.route('/css/<path>')
def static_css(path):
return app.send_static_file('css/' + path)
@app.route('/viz/<name>')
def show_bidirection_viz(name):
return render_template('viz.html', url_prefix=url_prefix, name=name)
@app.route('/sviz/<name>')
def show_viz(name):
return render_template('sviz.html', url_prefix=url_prefix, name=name)
@app.route('/cluster/entities/<repo>/<uri>')
@app.route('/entities/<repo>/<uri>')
def show_entity_cluster(repo, uri):
i = uri.find('?')
if i > 0:
uri = uri[:i]
uri = 'http://www.isi.edu/gaia/entities/' + uri
graph_uri = request.args.get('g', default=None)
show_image = request.args.get('image', default=True)
show_limit = request.args.get('limit', default=100)
sparql = SPARQLStore(setting.endpoint + '/' + repo)
model = Model(sparql, repo, graph_uri)
return show_cluster(model, uri, show_image, show_limit)
@app.route('/list/<type_>/<repo>')
def show_entity_cluster_list(type_, repo):
graph_uri = request.args.get('g', default=None)
limit = request.args.get('limit', default=100, type=int)
offset = request.args.get('offset', default=0, type=int)
sortby = request.args.get('sortby', default='size')
sparql = SPARQLStore(setting.endpoint + '/' + repo)
model = Model(sparql, repo, graph_uri)
if type_ == 'entity':
return render_template('list.html',
url_prefix=url_prefix,
type_='entity',
repo=repo,
graph=graph_uri,
limit=limit,
offset=offset,
sortby=sortby,
clusters=model.get_cluster_list(types.Entity, limit, offset, sortby))
elif type_ == 'event':
return render_template('list.html',
url_prefix=url_prefix,
type_='event',
repo=repo,
graph=graph_uri,
limit=limit,
offset=offset,
sortby=sortby,
clusters=model.get_cluster_list(types.Events, limit, offset, sortby))
else:
abort(404)
@app.route('/cluster/events/<repo>/<uri>')
@app.route('/events/<repo>/<uri>')
def show_event_cluster(repo, uri):
uri = 'http://www.isi.edu/gaia/events/' + uri
graph_uri = request.args.get('g', default=None)
show_image = request.args.get('image', default=True)
show_limit = request.args.get('limit', default=100)
sparql = SPARQLStore(setting.endpoint + '/' + repo)
model = Model(sparql, repo, graph_uri)
return show_cluster(model, uri, show_image, show_limit)
@app.route('/cluster/<repo>/AIDA/<path:uri>')
def show_columbia_cluster(repo, uri):
graph_uri = request.args.get('g', default=None)
uri = 'http://www.columbia.edu/AIDA/' + uri
show_image = request.args.get('image', default=True)
show_limit = request.args.get('limit', default=100)
sparql = SPARQLStore(setting.endpoint + '/' + repo)
model = Model(sparql, repo, graph_uri)
return show_cluster(model, uri, show_image, show_limit)
def show_cluster(model: Model, uri, show_image=True, show_limit=100):
cluster = model.get_cluster(uri)
show_image = show_image not in {False, 'False', 'false', 'no', '0'}
show_limit = show_limit not in {False, 'False', 'false', 'no', '0'} and (
isinstance(show_limit, int) and show_limit) or (show_limit.isdigit() and int(show_limit))
if not cluster:
abort(404)
print(cluster.href)
return render_template('cluster.html',
url_prefix=url_prefix,
repo=model.repo,
graph=model.graph,
cluster=cluster,
show_image=show_image,
show_limit=show_limit)
@app.route('/report')
def show_report():
update = request.args.get('update', default=False, type=bool)
report = Report(update)
return render_template('report.html', url_prefix=url_prefix, report=report)
@app.route('/doc/<doc_id>')
def show_doc_pronoun(doc_id):
return render_template('doc.html', url_prefix=url_prefix, doc_id=doc_id, content=recover_doc_online(doc_id))
@app.route('/cluster/entities/gt/<repo>')
def show_entity_gt(repo):
uri = request.args.get('e', default=None)
graph_uri = request.args.get('g', default=None)
sparql = SPARQLStore(setting.endpoint + '/' + repo)
model = Model(sparql, repo, graph_uri)
cluster = model.get_cluster(uri)
return render_template('groundtruth.html', url_prefix=url_prefix, repo=repo, graph=graph_uri, cluster=cluster)
@app.route('/cluster/import-debugger')
def show_import_debugger():
return render_template('import-debugger.html', repos=setting.repositories)
@app.route('/import-debugger', methods=['POST'])
def import_clusters():
repo = request.form['repo']
graph_uri = request.form['graph_uri']
debug_file = request.files['debug_file']
if repo and debug_file:
data_dir = setting.debug_data
if not os.path.isdir(data_dir):
os.makedirs(data_dir)
if graph_uri:
filename = repo + '-' + re.sub('[^0-9a-zA-Z]+', '-', graph_uri)
else:
filename = repo
file = data_dir + '/' + filename + '.jl'
# upload file
debug_file.save(file)
return '''
<!doctype html>
<title>Imported</title>
<h1>Imported</h1>
'''
else:
return '''
<!doctype html>
<title>Invalid</title>
<h1>Invalid</h1>
'''
@app.route('/groundtruth/<repo>', methods=['GET'])
def groundtruth(repo):
graph = request.args.get('g', default=None)
entity_uri = request.args.get('e', default=None)
if entity_uri:
if gt.has_gt(repo, graph):
print("has gt")
gt_cluster = gt.search_cluster(repo, graph, entity_uri)
return jsonify(gt_cluster)
else:
print("no gt")
return not_found()
else:
return jsonify(gt.get_all())
@app.route('/cluster/entities/debug/<repo>', methods=['GET'])
def debugger(repo):
graph = request.args.get('g', default=None)
cluster_uri = request.args.get('cluster')
if cluster_uri:
result = debug.get_debug_for_cluster(repo, graph, cluster_uri)
else:
return not_found()
if result:
return jsonify(result)
else:
return not_found()
@app.errorhandler(404)
def not_found(error=None):
message = {
'status': 404,
'message': 'Not Found: ' + request.url,
}
resp = jsonify(message)
resp.status_code = 404
return resp
if __name__ == '__main__':
# app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
app.debug = True
app.run(host='0.0.0.0', port=setting.port)
| true | true |
f73aa7cd50353763aebc57cd41c7030d67f336cc | 2,882 | py | Python | opennre/tests/converters/semeval2010/test_spacy_sdp_converter.py | igorvlnascimento/DeepREF | 0fed8120571e44e12ee3d1861289bc101c0a275f | [
"MIT"
] | null | null | null | opennre/tests/converters/semeval2010/test_spacy_sdp_converter.py | igorvlnascimento/DeepREF | 0fed8120571e44e12ee3d1861289bc101c0a275f | [
"MIT"
] | null | null | null | opennre/tests/converters/semeval2010/test_spacy_sdp_converter.py | igorvlnascimento/DeepREF | 0fed8120571e44e12ee3d1861289bc101c0a275f | [
"MIT"
] | null | null | null | # from opennre.dataset.converters.converter_semeval2010 import ConverterSemEval2010
# def test_should_return_correct_spacy_sdp_when_doing_sdp_preprocessing_first_example():
# p = ConverterSemEval2010("spacy", "general")
# assert p.tokenize("the most common ENTITYSTART audits ENTITYEND were about ENTITYOTHERSTART waste ENTITYOTHEREND and recycling.")[-1] \
# == " ".join(["audits", "were", "about", "waste"])
# def test_should_return_correct_spacy_sdp_when_doing_sdp_preprocessing_second_example():
# p = ConverterSemEval2010("spacy", "general")
# assert p.tokenize("the ENTITYSTART company ENTITYEND fabricates plastic ENTITYOTHERSTART chairs ENTITYOTHEREND .")[-1] \
# == " ".join(["company", "fabricates", "chairs"])
# def test_should_return_correct_spacy_sdp_when_doing_sdp_preprocessing_third_example():
# p = ConverterSemEval2010("spacy", "general")
# assert p.tokenize("i spent a year working for a ENTITYSTART software ENTITYEND ENTITYOTHERSTART company ENTITYOTHEREND to pay off my college loans.")[-1] \
# == " ".join(["software", "company"])
# def test_should_return_correct_spacy_sdp_when_doing_sdp_preprocessing_having_entities_with_more_than_one_word():
# p = ConverterSemEval2010("spacy", "general")
# assert p.tokenize("sci-fi channel is the ENTITYSTART cable network ENTITYEND exclusively dedicated to offering classic ENTITYOTHERSTART science fiction tv shows ENTITYOTHEREND and movies, as well as bold original programming.")[-1] \
# == " ".join(["network", "dedicated", "to", "offering", "shows"])
# def test_should_return_correct_spacy_sdp_when_doing_sdp_preprocessing_having_two_equal_word_entities_at_the_same_sentence():
# p = ConverterSemEval2010("spacy", "general")
# row = {'sdp': [('telescope', 'the'), ('comprises', 'telescope'), ('comprises', 'lenses'), ('lenses', 'and'), ('lenses', 'tubes'), ('are', 'comprises'), ('are', ':'), ('are', 'both'), ('are', 'important'), ('are', '.'), ('important', 'extremely'), ('important', 'to'), ('to', 'performance'), ('performance', 'the'), ('performance', 'of'), ('of', 'telescope'), ('telescope', 'the')]}
# e1 = "telescope"
# e2 = "lenses"
# assert p.tokenize("the ENTITYSTART telescope ENTITYEND comprises ENTITYOTHERSTART lenses ENTITYOTHEREND and tubes: both are extremely important to the performance of the telescope.")[-1] \
# == " ".join(["telescope", "comprises", "lenses"])
# def test_should_return_correct_spacy_sdp_when_doing_sdp_preprocessing_having_uppercases_entities():
# p = ConverterSemEval2010("spacy", "general")
# assert p.tokenize("ENTITYSTART carpenters ENTITYEND build many things from ENTITYOTHERSTART wood ENTITYOTHEREND and other materials, like buildings and boats.")[-1] \
# == " ".join(["carpenters", "build", "things", "from", "wood"])
| 77.891892 | 387 | 0.710965 | true | true | |
f73aa83a76cae82a3526ca7dd3d012ae0fb9f86d | 1,478 | py | Python | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/exported_python_args.py | yage99/tensorflow | c7fa71b32a3635eb25596ae80d007b41007769c4 | [
"Apache-2.0"
] | 388 | 2020-06-27T01:38:29.000Z | 2022-03-29T14:12:01.000Z | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/exported_python_args.py | sseung0703/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | [
"Apache-2.0"
] | 1,056 | 2019-12-15T01:20:31.000Z | 2022-02-10T02:06:28.000Z | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/exported_python_args.py | sseung0703/tensorflow | be084bd7a4dd241eb781fc704f57bcacc5c9b6dd | [
"Apache-2.0"
] | 75 | 2021-12-24T04:48:21.000Z | 2022-03-29T10:13:39.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: (! %p/exported_python_args 2>&1) | FileCheck %s
# pylint: disable=missing-docstring,line-too-long,dangerous-default-value
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def some_function(self, x):
return self.callee(x)
# CHECK: While importing SavedModel function 'callee': in input signature:
# CHECK-SAME: Unhandled structured value kind {{.*}} at index path: <value>.1.foo
@tf.function
def callee(self, x, n={'foo': 42}):
return x
if __name__ == '__main__':
common.do_test(TestModule)
| 35.190476 | 83 | 0.714479 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
@tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
def some_function(self, x):
return self.callee(x)
@tf.function
def callee(self, x, n={'foo': 42}):
return x
if __name__ == '__main__':
common.do_test(TestModule)
| true | true |
f73aaaf086bdb134abb896c1768d6413a0c141df | 7,513 | py | Python | sdk/python/pulumi_sonarqube/sonar_project.py | jshield/pulumi-sonarqube | 53664a97903af3ecdf4f613117d83d0acae8e53e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_sonarqube/sonar_project.py | jshield/pulumi-sonarqube | 53664a97903af3ecdf4f613117d83d0acae8e53e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_sonarqube/sonar_project.py | jshield/pulumi-sonarqube | 53664a97903af3ecdf4f613117d83d0acae8e53e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['SonarProjectArgs', 'SonarProject']
@pulumi.input_type
class SonarProjectArgs:
def __init__(__self__, *,
project: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SonarProject resource.
"""
pulumi.set(__self__, "project", project)
if name is not None:
pulumi.set(__self__, "name", name)
if visibility is not None:
pulumi.set(__self__, "visibility", visibility)
@property
@pulumi.getter
def project(self) -> pulumi.Input[str]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: pulumi.Input[str]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def visibility(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "visibility")
@visibility.setter
def visibility(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "visibility", value)
@pulumi.input_type
class _SonarProjectState:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering SonarProject resources.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if visibility is not None:
pulumi.set(__self__, "visibility", visibility)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def visibility(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "visibility")
@visibility.setter
def visibility(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "visibility", value)
class SonarProject(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a SonarProject resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SonarProjectArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a SonarProject resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param SonarProjectArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SonarProjectArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SonarProjectArgs.__new__(SonarProjectArgs)
__props__.__dict__["name"] = name
if project is None and not opts.urn:
raise TypeError("Missing required property 'project'")
__props__.__dict__["project"] = project
__props__.__dict__["visibility"] = visibility
super(SonarProject, __self__).__init__(
'sonarqube:index/sonarProject:SonarProject',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None) -> 'SonarProject':
"""
Get an existing SonarProject resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SonarProjectState.__new__(_SonarProjectState)
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["visibility"] = visibility
return SonarProject(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
return pulumi.get(self, "project")
@property
@pulumi.getter
def visibility(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "visibility")
| 37.193069 | 134 | 0.625982 |
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['SonarProjectArgs', 'SonarProject']
@pulumi.input_type
class SonarProjectArgs:
def __init__(__self__, *,
project: pulumi.Input[str],
name: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None):
pulumi.set(__self__, "project", project)
if name is not None:
pulumi.set(__self__, "name", name)
if visibility is not None:
pulumi.set(__self__, "visibility", visibility)
@property
@pulumi.getter
def project(self) -> pulumi.Input[str]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: pulumi.Input[str]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def visibility(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "visibility")
@visibility.setter
def visibility(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "visibility", value)
@pulumi.input_type
class _SonarProjectState:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None):
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if visibility is not None:
pulumi.set(__self__, "visibility", visibility)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def visibility(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "visibility")
@visibility.setter
def visibility(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "visibility", value)
class SonarProject(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None,
__props__=None):
...
@overload
def __init__(__self__,
resource_name: str,
args: SonarProjectArgs,
opts: Optional[pulumi.ResourceOptions] = None):
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SonarProjectArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SonarProjectArgs.__new__(SonarProjectArgs)
__props__.__dict__["name"] = name
if project is None and not opts.urn:
raise TypeError("Missing required property 'project'")
__props__.__dict__["project"] = project
__props__.__dict__["visibility"] = visibility
super(SonarProject, __self__).__init__(
'sonarqube:index/sonarProject:SonarProject',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
visibility: Optional[pulumi.Input[str]] = None) -> 'SonarProject':
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _SonarProjectState.__new__(_SonarProjectState)
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["visibility"] = visibility
return SonarProject(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
return pulumi.get(self, "project")
@property
@pulumi.getter
def visibility(self) -> pulumi.Output[Optional[str]]:
return pulumi.get(self, "visibility")
| true | true |
f73aae3f98d12580e9f23b1b54552ca9d80a12b5 | 3,113 | py | Python | hdf5_loading_three_bumps.py | h-mayorquin/time_series_basic | 654fb67ef6258b3f200c15a2b8068ab9300401d7 | [
"BSD-3-Clause"
] | null | null | null | hdf5_loading_three_bumps.py | h-mayorquin/time_series_basic | 654fb67ef6258b3f200c15a2b8068ab9300401d7 | [
"BSD-3-Clause"
] | null | null | null | hdf5_loading_three_bumps.py | h-mayorquin/time_series_basic | 654fb67ef6258b3f200c15a2b8068ab9300401d7 | [
"BSD-3-Clause"
] | null | null | null | import h5py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from signals.aux_functions import gaussian_bump
import nexa.loading as load
from visualization.sensors import visualize_SLM_hdf5
from visualization.sensors import visualize_STDM_hdf5
from visualization.sensor_clustering import visualize_cluster_matrix_hdf5
# Load the database
location = './results_database/three_bumps_distance.hdf5'
database = h5py.File(location, 'r')
# Time
Tmax = 1000
dt = 1.0
time = np.arange(0, Tmax, dt)
# Parameters that the bumpbs share
max_rate = 100
base = 10
value = 50
attenuation = 2
# Define three arangments for the values of the gaussian bumpbs
center1 = 100
center2 = 500
center3 = 700
# Now create the guassian bumps
gb1 = gaussian_bump(time, center1, max_rate, base, value, attenuation)
gb2 = gaussian_bump(time, center2, max_rate, base, value * 2, attenuation)
gb3 = gaussian_bump(time, center3, max_rate, base, value * 0.5, attenuation)
# Database extraction
run_name = str(center1) + '-'
run_name += str(center2) + '-'
run_name += str(center3)
nexa_arrangement = '3-4-3'
r = database[run_name]
# Load everything
SLM = load.get_SLM_hdf5(database, run_name)
STDM = load.get_STDM_hdf5(database, run_name, nexa_arrangement)
cluster_to_index = load.get_cluster_to_index_hdf5(database, run_name, nexa_arrangement)
index_to_cluster = load.get_index_to_cluster_hdf5(database, run_name, nexa_arrangement)
cluster_to_time_centers = load.get_cluster_to_time_centers_hdf5(database, run_name, nexa_arrangement)
# Now visualize the signals and the SLM
if False:
fig = plt.figure()
gs = gridspec.GridSpec(3, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(time, gb1)
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(time,gb2)
ax3 = fig.add_subplot(gs[2, 0])
ax3.plot(time, gb3)
ax4 = fig.add_subplot(gs[:, 1])
visualize_SLM_hdf5(database, run_name, ax=ax4)
plt.show()
# Now the signals and the STDM
if False:
fig = plt.figure()
gs = gridspec.GridSpec(3, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(time, gb1)
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(time,gb2)
ax3 = fig.add_subplot(gs[2, 0])
ax3.plot(time, gb3)
ax4 = fig.add_subplot(gs[:, 1])
visualize_STDM_hdf5(database, run_name, nexa_arrangement, ax= ax4)
plt.show()
# Now visualize the SLM and STDM
if False:
fig = plt.figure()
gs = gridspec.GridSpec(2, 2)
ax1 = fig.add_subplot(gs[:, 0])
visualize_SLM_hdf5(database, run_name, ax=ax1)
ax2 = fig.add_subplot(gs[:, 1])
visualize_STDM_hdf5(database, run_name, nexa_arrangement, ax= ax2)
fig.show()
plt.close(fig)
# Now visualize the signals and the cluster matrix
if True:
fig = plt.figure()
gs = gridspec.GridSpec(3, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(time, gb1)
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(time, gb2)
ax3 = fig.add_subplot(gs[2, 0])
ax3.plot(time, gb3)
ax4 = fig.add_subplot(gs[:, 1])
visualize_cluster_matrix_hdf5(database, run_name, nexa_arrangement, ax=ax4)
plt.show()
| 27.069565 | 101 | 0.713138 | import h5py
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from signals.aux_functions import gaussian_bump
import nexa.loading as load
from visualization.sensors import visualize_SLM_hdf5
from visualization.sensors import visualize_STDM_hdf5
from visualization.sensor_clustering import visualize_cluster_matrix_hdf5
location = './results_database/three_bumps_distance.hdf5'
database = h5py.File(location, 'r')
Tmax = 1000
dt = 1.0
time = np.arange(0, Tmax, dt)
max_rate = 100
base = 10
value = 50
attenuation = 2
center1 = 100
center2 = 500
center3 = 700
gb1 = gaussian_bump(time, center1, max_rate, base, value, attenuation)
gb2 = gaussian_bump(time, center2, max_rate, base, value * 2, attenuation)
gb3 = gaussian_bump(time, center3, max_rate, base, value * 0.5, attenuation)
run_name = str(center1) + '-'
run_name += str(center2) + '-'
run_name += str(center3)
nexa_arrangement = '3-4-3'
r = database[run_name]
SLM = load.get_SLM_hdf5(database, run_name)
STDM = load.get_STDM_hdf5(database, run_name, nexa_arrangement)
cluster_to_index = load.get_cluster_to_index_hdf5(database, run_name, nexa_arrangement)
index_to_cluster = load.get_index_to_cluster_hdf5(database, run_name, nexa_arrangement)
cluster_to_time_centers = load.get_cluster_to_time_centers_hdf5(database, run_name, nexa_arrangement)
if False:
fig = plt.figure()
gs = gridspec.GridSpec(3, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(time, gb1)
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(time,gb2)
ax3 = fig.add_subplot(gs[2, 0])
ax3.plot(time, gb3)
ax4 = fig.add_subplot(gs[:, 1])
visualize_SLM_hdf5(database, run_name, ax=ax4)
plt.show()
if False:
fig = plt.figure()
gs = gridspec.GridSpec(3, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(time, gb1)
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(time,gb2)
ax3 = fig.add_subplot(gs[2, 0])
ax3.plot(time, gb3)
ax4 = fig.add_subplot(gs[:, 1])
visualize_STDM_hdf5(database, run_name, nexa_arrangement, ax= ax4)
plt.show()
if False:
fig = plt.figure()
gs = gridspec.GridSpec(2, 2)
ax1 = fig.add_subplot(gs[:, 0])
visualize_SLM_hdf5(database, run_name, ax=ax1)
ax2 = fig.add_subplot(gs[:, 1])
visualize_STDM_hdf5(database, run_name, nexa_arrangement, ax= ax2)
fig.show()
plt.close(fig)
if True:
fig = plt.figure()
gs = gridspec.GridSpec(3, 2)
ax1 = fig.add_subplot(gs[0, 0])
ax1.plot(time, gb1)
ax2 = fig.add_subplot(gs[1, 0])
ax2.plot(time, gb2)
ax3 = fig.add_subplot(gs[2, 0])
ax3.plot(time, gb3)
ax4 = fig.add_subplot(gs[:, 1])
visualize_cluster_matrix_hdf5(database, run_name, nexa_arrangement, ax=ax4)
plt.show()
| true | true |
f73aae7e0b0a3486f44f67e82ab00863ac51f7f5 | 14,193 | py | Python | utils/ContextLogger.py | paulaWesselmann/testing_pydial | bf4fd0c99242e49d67895d92c6cfc3dc31084182 | [
"Apache-2.0"
] | 3 | 2019-09-27T06:07:12.000Z | 2020-01-06T19:00:34.000Z | utils/ContextLogger.py | paulaWesselmann/testing_pydial | bf4fd0c99242e49d67895d92c6cfc3dc31084182 | [
"Apache-2.0"
] | null | null | null | utils/ContextLogger.py | paulaWesselmann/testing_pydial | bf4fd0c99242e49d67895d92c6cfc3dc31084182 | [
"Apache-2.0"
] | 1 | 2019-11-27T09:28:10.000Z | 2019-11-27T09:28:10.000Z | ###############################################################################
# PyDial: Multi-domain Statistical Spoken Dialogue System Software
###############################################################################
#
# Copyright 2015 - 2017
# Cambridge University Engineering Department Dialogue Systems Group
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
###############################################################################
'''
ContextLogger.py - wrapper for Python logging API
==========================================================================
Copyright CUED Dialogue Systems Group 2015 - 2017
**Relevant Config variables** [Default values]::
[logging]
screen_level=info
file_level=debug
file=logFileName.txt
usecolor = False
**Basic Usage**:
>>> from utils import ContextLogger
>>> ContextLogger.createLoggingHandlers()
>>> logger = ContextLogger.getLogger('Name')
then within any script issue debug, info, warning and error messages, eg
>>> logger.warning("String too long [%d]", 100)
issuing an error message generates ``ExceptionRaisedByLogger``.
Logger can if required be configured via a config section.
Then pass config info to ``createLoggingHandlers``
>>> ContextLogger.createLoggingHandlers(config)
************************
'''
__author__ = "cued_dialogue_systems_group"
import contextlib, logging, inspect, copy, sys, traceback, time
import os.path
# ----------------------------------------------
# Configure the standard Python logging API
# ----------------------------------------------
msg_format = '%(levelname)-7s:: %(asctime)s: %(name)4s %(message)s'
class NOcolors:
'''
ASCII escape chars just print junk when dumping logger output to file. Can use the config setting usecolor.
'''
HEADER = ''
OKBLUE = ''
OKGREEN = ''
WARNING = ''
FAIL = ''
ENDC = ''
BOLD = ''
CYAN = ''
MAGENTA = ''
class bcolors:
'''
Color specification for logger output.
'''
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = "\033[1m"
CYAN = '\033[96m'
MAGENTA = '\033[95m'
class ConsoleFormatter(logging.Formatter):
'''
Class to format logger output to console.
'''
def __init__(self,*args, **kwargs) :
#NB: import coloredlogs may also offer a solution
self.color_choice = bcolors() if kwargs['colors'] in [True, 'True'] else NOcolors()
del kwargs['colors']
kwargs['datefmt']='%H:%M:%S'
logging.Formatter.__init__(self, msg_format, *args, **kwargs)
self.mapping = {
logging.WARNING: self.color_choice.WARNING,
logging.ERROR: self.color_choice.FAIL,
logging.INFO: self.color_choice.OKGREEN,
logging.DEBUG: self.color_choice.OKBLUE,
25: self.color_choice.CYAN, # logging.DIAL
35: self.color_choice.MAGENTA # logging.RESULTS
}
def format(self, record):
record2 = copy.copy(record)
if record.levelno in self.mapping:
record2.levelname = self.mapping[record.levelno] + \
record.levelname.center(7) + self.color_choice.ENDC
# get actual message:
msg_split = record.msg.split('\n')
msg = '\n'.join(msg_split[1:])
#record2.msg = msg_split[0] + '\n' + self.color_choice.BOLD + msg + self.color_choice.ENDC
record2.msg = msg_split[0] + self.color_choice.BOLD + msg + self.color_choice.ENDC
try:
return super(ConsoleFormatter , self).format(record2)
except TypeError:
print('except TypeError: in ContextLogger.ConsoleFormatter(). Known minor issue with message format of logger')
# Note: this might be more serious - it may be stopping the individual module logging level specification...
cl = {} # current set of context loggers indexed by module name
module_level = {} # logging level for each logger in cl
def resetLoggingHandlers():
top_logger = logging.getLogger('')
top_logger.handlers = []
def createLoggingHandlers(config=None, screen_level = "INFO", \
log_file = None, file_level = "DEBUG", use_color = True):
"""
Create a top level logger and configure logging handlers
:param config: a config structure as returned by the std ConfigParser |.|
:param screen_level: default screen logging level if no config |.|
:type screen_level: str
:param log_file: default log file if no config |.|
:type log_file: str
:param file_level: default file logging level if no config
:type file_level: str
:returns: None
.. note::
Valid logging levels are "DEBUG", "INFO", "WARNING", "ERROR"
"""
global cl
global module_level
top_logger = logging.getLogger('')
top_logger.setLevel(logging.DEBUG)
# levels for logging
file_append = False
if config:
if config.has_option("logging", "file") :
log_file = config.get("logging", "file")
if config.has_option("logging", "file_level") :
file_level = config.get("logging", "file_level").upper()
if config.has_option("logging", "file_append") :
file_append = config.get("logging", "file_append").upper()
if config.has_option("logging", "screen_level") :
screen_level = config.get("logging", "screen_level").upper()
if config.has_option("logging", "usecolor"):
use_color = config.get("logging", "usecolor")
for option in config.options('logging'):
if option not in ['usecolor','file', 'file_level', 'screen_level'] and option not in config.defaults():
logger_name = option.lower()
module_level[logger_name] = config.get('logging', option)
if logger_name in cl:
cl[logger_name].setLevel(module_level[logger_name])
# configure console output:
"""
There was a problem with dumping logger output to file - print() statements and logger comments get separated.
StreamHandler now sends to sys.stdout
"""
logging.addLevelName(25, "DIAL")
logging.addLevelName(35, "RESULTS")
ch = logging.StreamHandler(sys.stdout) # NB: originally took no arguments
if screen_level == "DIAL":
ch.setLevel(25)
elif screen_level == "RESULTS":
ch.setLevel(35)
else:
ch.setLevel(getattr(logging, screen_level.upper()))
ch.setFormatter(ConsoleFormatter(colors=use_color))
# add the handlers to logger
top_logger.addHandler(ch)
# configure file output:
if log_file :
# check that log file directory exists and if necessary create it
dname = os.path.dirname(log_file)
if not os.path.isdir(dname) and dname != '':
try:
os.mkdir(dname)
except OSError:
top_logger.error("Logging directory {} cannot be created.".format(dname))
raise
# create file handler which logs even debug messages
formatter = logging.Formatter(msg_format, datefmt='%H:%M:%S',)
file_mode = 'w'
if file_append:
file_mode = 'a'
fh = logging.FileHandler(log_file, mode=file_mode)
if file_level.upper() == 'DIAL':
lvl = 25
elif file_level.upper() == 'RESULTS':
lvl = 35
else:
lvl = getattr(logging, file_level.upper())
fh.setLevel(lvl)
fh.setFormatter(formatter)
top_logger.addHandler(fh)
# ----------------------------------------------
# Interface to the standard Python logging API
# ----------------------------------------------
class ExceptionRaisedByLogger(Exception) :
pass
class ContextLogger:
"""
Wrapper for Python logging class.
"""
def __init__(self, module_name=None, *args):
self.logger = logging.getLogger(module_name)
self.stack = args
self._log = []
sys.excepthook = self._exceptHook
def setLevel(self, level):
"""
Set the logging level of this logger.
:param level: default screen logging level if no config
:type level: str
:returns: None
"""
self.logger.setLevel(getattr(logging, level.upper()))
def _exceptHook(self, etype, value, tb) :
if etype != ExceptionRaisedByLogger :
msg = self._convertMsg("Uncaught exception: "+str(etype) + "( "+str(value)+" )\n")
tb_msg = "".join( traceback.format_exception(etype, value, tb))
tb_msg = "\n".join([(" "*10)+line for line in tb_msg.split("\n")])
msg += tb_msg
self.logger.error(msg)
sys.__excepthook__(etype, value, tb)
@contextlib.contextmanager
def addContext(self, *args) :
"""
Create a nested named context for use in a ``with`` statement.
:param args: list of one or more context names (str)
:returns: ContextManager
Example:
>>> with mylogger.addContext("Session 1") :
... mylogger.warning("Warn Message from Session 1")
"""
n = len(self.stack)
self.stack += args
yield self.stack
self.stack = self.stack[:n]
@contextlib.contextmanager
def addTimedContext(self, *args) :
"""
Create a timed nested named context for use in a ``with`` statement.
:param args: list of one or more context names (str)
:returns: ContextManager
Example:
>>> with mylogger.addContext("Session 1") :
... Dostuff()
On exit from the ``with`` statement, the elapsed time is logged.
"""
t0 = time.time()
n = len(self.stack)
self.stack += args
yield self.stack
t1 = time.time()
self.info("Timer %.4fs"%(t1-t0))
self.stack = self.stack[:n]
def _callLocString(self, ):
inspected = inspect.getouterframes(inspect.currentframe())
frame, filename, line_number, function_name, lines, index = inspected[min(3, len(inspected)-1)]
filename = filename.split("/")[-1]
return filename + ":" + function_name + ">" + str(line_number)
def _stackString(self) :
if len(self.stack) == 0:
return ""
return "(" + ", ".join(map(str, self.stack)) + "): "
def _convertMsg(self, msg) :
#return self._callLocString() + ": " + self._stackString() + "\n "+msg
s = self._callLocString().split(':')
calls = s[0][0:30]+" <"+s[1][0:30]
stacks = self._stackString()
return "%62s : %s %s" % (calls,stacks,msg)
def debug(self,msg,*args,**kwargs):
"""
Log a DEBUG message.
:param msg: message string
:type msg: formatted-str
:param args: args to formatted message string if any
:returns: None
"""
msg = self._convertMsg(msg)
self.logger.debug(msg,*args,**kwargs)
def info(self,msg,*args,**kwargs):
""" Log an INFO message.
:param msg: message string
:type msg: formatted-str
:param args: args to formatted message string if any
:returns: None
"""
msg = self._convertMsg(msg)
self.logger.info(msg,*args,**kwargs)
def warning(self,msg,*args,**kwargs):
"""
Log a WARNING message.
:param msg: message string
:type msg: formatted-str
:param args: args to formatted message string if any
:returns: None
"""
msg = self._convertMsg(msg)
self.logger.warning(msg,*args,**kwargs)
def error(self,msg,*args,**kwargs):
"""
Log an ERROR message.
:param msg: message string
:type msg: formatted-str
:param args: args to formatted message string if any
:returns: None
.. note::
Issuing an error message also raises exception ``ExceptionRaisedByLogger``
"""
msg0 = msg
msg = self._convertMsg(msg)
self.logger.error(msg,*args,**kwargs)
raise ExceptionRaisedByLogger(msg0)
def dial(self, msg, *args, **kwargs):
msg = self._convertMsg(msg)
self.logger.log(25,msg,*args,**kwargs)
def results(self, msg, *args, **kwargs):
msg = self._convertMsg(msg)
self.logger.log(35,msg,*args,**kwargs)
def getLogger(name):
"""
Retrieve or if necessary create a context logger with specified name.
:param name: name of logger to create or retrieve
:type name: str
:returns: logger (ContextLogger.ContextLogger)
.. note::
Use **only** this function to create instances of the ContextLogger class
"""
global cl
name = name.lower()
if name not in cl:
cl[name] = ContextLogger(name)
if name in module_level:
cl[name].setLevel(module_level[name])
return cl[name]
if __name__ == '__main__':
# creates upside down traffic lights
createLoggingHandlers()
cl = ContextLogger(__name__)
cl.info("starting test")
with cl.addContext("session 1") :
cl.warning("warning!")
try :
cl.error("error")
except ExceptionRaisedByLogger :
cl.info("ignoring the exception raised by the logger")
with cl.addContext("session 2"):
# try raising an exception
x = {}
print(x["door"])
| 33.474057 | 123 | 0.585077 | true | true | |
f73aafa0e6f239adfd578b43d03bccb1270e8988 | 2,566 | py | Python | tclCommands/TclCommandIsolate.py | brownjohnf/flatcam | 17b3be3c209934e8802a41062a3120ebf479c956 | [
"MIT"
] | 4 | 2018-12-31T18:57:21.000Z | 2021-04-04T04:57:22.000Z | tclCommands/TclCommandIsolate.py | brownjohnf/flatcam | 17b3be3c209934e8802a41062a3120ebf479c956 | [
"MIT"
] | 1 | 2017-12-23T22:04:32.000Z | 2017-12-23T22:04:32.000Z | tclCommands/TclCommandIsolate.py | brownjohnf/flatcam | 17b3be3c209934e8802a41062a3120ebf479c956 | [
"MIT"
] | null | null | null | from ObjectCollection import *
import TclCommand
class TclCommandIsolate(TclCommand.TclCommandSignaled):
"""
Tcl shell command to Creates isolation routing geometry for the given Gerber.
example:
set_sys units MM
new
open_gerber tests/gerber_files/simple1.gbr -outname margin
isolate margin -dia 3
cncjob margin_iso
"""
# array of all command aliases, to be able use old names for backward compatibility (add_poly, add_polygon)
aliases = ['isolate']
# dictionary of types from Tcl command, needs to be ordered
arg_names = collections.OrderedDict([
('name', str)
])
# dictionary of types from Tcl command, needs to be ordered , this is for options like -optionname value
option_types = collections.OrderedDict([
('dia', float),
('passes', int),
('overlap', float),
('combine', int),
('outname', str)
])
# array of mandatory options for current Tcl command: required = {'name','outname'}
required = ['name']
# structured help for current command, args needs to be ordered
help = {
'main': "Creates isolation routing geometry for the given Gerber.",
'args': collections.OrderedDict([
('name', 'Name of the source object.'),
('dia', 'Tool diameter.'),
('passes', 'Passes of tool width.'),
('overlap', 'Fraction of tool diameter to overlap passes.'),
('combine', 'Combine all passes into one geometry.'),
('outname', 'Name of the resulting Geometry object.')
]),
'examples': []
}
def execute(self, args, unnamed_args):
"""
execute current TCL shell command
:param args: array of known named arguments and options
:param unnamed_args: array of other values which were passed into command
without -somename and we do not have them in known arg_names
:return: None or exception
"""
name = args['name']
if 'outname' not in args:
args['outname'] = name + "_iso"
if 'timeout' in args:
timeout = args['timeout']
else:
timeout = 10000
obj = self.app.collection.get_by_name(name)
if obj is None:
self.raise_tcl_error("Object not found: %s" % name)
if not isinstance(obj, FlatCAMGerber):
self.raise_tcl_error('Expected FlatCAMGerber, got %s %s.' % (name, type(obj)))
del args['name']
obj.isolate(**args)
| 32.075 | 112 | 0.601325 | from ObjectCollection import *
import TclCommand
class TclCommandIsolate(TclCommand.TclCommandSignaled):
aliases = ['isolate']
arg_names = collections.OrderedDict([
('name', str)
])
option_types = collections.OrderedDict([
('dia', float),
('passes', int),
('overlap', float),
('combine', int),
('outname', str)
])
required = ['name']
help = {
'main': "Creates isolation routing geometry for the given Gerber.",
'args': collections.OrderedDict([
('name', 'Name of the source object.'),
('dia', 'Tool diameter.'),
('passes', 'Passes of tool width.'),
('overlap', 'Fraction of tool diameter to overlap passes.'),
('combine', 'Combine all passes into one geometry.'),
('outname', 'Name of the resulting Geometry object.')
]),
'examples': []
}
def execute(self, args, unnamed_args):
name = args['name']
if 'outname' not in args:
args['outname'] = name + "_iso"
if 'timeout' in args:
timeout = args['timeout']
else:
timeout = 10000
obj = self.app.collection.get_by_name(name)
if obj is None:
self.raise_tcl_error("Object not found: %s" % name)
if not isinstance(obj, FlatCAMGerber):
self.raise_tcl_error('Expected FlatCAMGerber, got %s %s.' % (name, type(obj)))
del args['name']
obj.isolate(**args)
| true | true |
f73aafd417b6ab1ed73ee0e3b02c5b3701a01ccc | 384 | py | Python | predictor/migrations/0003_auto_20200820_0704.py | fadilzteria/DLIR-ID-AR | a383fc7fc8a066ea8877c34df26a5829ec9c9396 | [
"MIT"
] | null | null | null | predictor/migrations/0003_auto_20200820_0704.py | fadilzteria/DLIR-ID-AR | a383fc7fc8a066ea8877c34df26a5829ec9c9396 | [
"MIT"
] | null | null | null | predictor/migrations/0003_auto_20200820_0704.py | fadilzteria/DLIR-ID-AR | a383fc7fc8a066ea8877c34df26a5829ec9c9396 | [
"MIT"
] | null | null | null | # Generated by Django 3.0.3 on 2020-08-19 23:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('predictor', '0002_review'),
]
operations = [
migrations.AlterField(
model_name='review',
name='dokumen_relevan',
field=models.IntegerField(default=0),
),
]
| 20.210526 | 49 | 0.596354 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('predictor', '0002_review'),
]
operations = [
migrations.AlterField(
model_name='review',
name='dokumen_relevan',
field=models.IntegerField(default=0),
),
]
| true | true |
f73ab0995d30008dd726e836ee5323b82909d58c | 572 | py | Python | mis/wsgi.py | rcdosado/URO-MIS | 84834048ad747b4355b354d9d9ddfa814cd4365f | [
"MIT"
] | null | null | null | mis/wsgi.py | rcdosado/URO-MIS | 84834048ad747b4355b354d9d9ddfa814cd4365f | [
"MIT"
] | null | null | null | mis/wsgi.py | rcdosado/URO-MIS | 84834048ad747b4355b354d9d9ddfa814cd4365f | [
"MIT"
] | null | null | null | """
WSGI config for mis project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
if os.getenv("DJANGO_MODE").lower() != 'local':
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mis.settings")
if os.getenv("DJANGO_MODE").lower() != 'local':
application = Cling(get_wsgi_application())
else:
application = get_wsgi_application()
| 23.833333 | 78 | 0.744755 |
import os
from django.core.wsgi import get_wsgi_application
if os.getenv("DJANGO_MODE").lower() != 'local':
from dj_static import Cling
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mis.settings")
if os.getenv("DJANGO_MODE").lower() != 'local':
application = Cling(get_wsgi_application())
else:
application = get_wsgi_application()
| true | true |
f73ab0d1a15f665a17eef5ab8b529af1a49552fc | 489 | py | Python | ws-dan/utils.py | sun-yitao/GrabAIChallenge | 05946339e5a478216d7a9234e29e9bd7af5b3492 | [
"MIT"
] | 10 | 2019-07-05T05:28:30.000Z | 2020-09-15T02:47:16.000Z | ws-dan/utils.py | sun-yitao/GrabAIChallenge | 05946339e5a478216d7a9234e29e9bd7af5b3492 | [
"MIT"
] | 6 | 2019-11-18T12:59:22.000Z | 2022-02-10T00:23:00.000Z | ws-dan/utils.py | sun-yitao/GrabAIChallenge | 05946339e5a478216d7a9234e29e9bd7af5b3492 | [
"MIT"
] | 2 | 2019-07-17T15:12:14.000Z | 2020-04-15T19:06:41.000Z | import numpy as np
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100. / batch_size))
return np.array(res, dtype='float')
| 25.736842 | 63 | 0.611452 | import numpy as np
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100. / batch_size))
return np.array(res, dtype='float')
| true | true |
f73ab118df9a8181f77aee3f797d22dd165e3b1e | 10,496 | py | Python | Software/interface/main.py | pet-eletrica/tur | f6c49fe15945378eacf4b4b23411d557c8e88c32 | [
"MIT"
] | 1 | 2019-09-26T04:03:51.000Z | 2019-09-26T04:03:51.000Z | main.py | TallesSilva/Sensor_Pista_TUR | 0553e60dc67d1b8006ab3cba106c4baf9056d7df | [
"Unlicense"
] | null | null | null | main.py | TallesSilva/Sensor_Pista_TUR | 0553e60dc67d1b8006ab3cba106c4baf9056d7df | [
"Unlicense"
] | null | null | null | import sys
from time import *
import serial
import serial.tools.list_ports as serial_tools
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5 import QtCore, QtGui, QtWidgets
import pista as base
#---------------------------- VARIAVEIS GLOBAIS
global tempo_sensor
tempo_sensor = {
'contador_do_timer' : [0],
'A' : [0],
'B' : [0],
'C' : [0],
'D' : [0],
'E' : [0],
'A_B' : [0],
'B_C' : [0],
'C_D' : [0],
'D_A' : [0]
}
# app = QtWidgets.QApplication(sys.argv)
#
# input_image = imread(pista.jpg)
# height, width, channels = input_image.shape
# bytesPerLine = channels * width
# qImg = QtGui.QImage(input_image.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888)
# pixmap01 = QtGui.QPixmap.fromImage(qImg)
# pixmap_image = QtGui.QPixmap(pixmap01)
# label_imageDisplay = QtWidgets.QLabel()
# label_imageDisplay.setPixmap(pixmap_image)
# label_imageDisplay.setAlignment(QtCore.Qt.AlignCenter)
# label_imageDisplay.setScaledContents(True)
# label_imageDisplay.setMinimumSize(1,1)
# label_imageDisplay.show()
# sys.exit(app.exec_())
# referencia: https://nikolak.com/pyqt-threading-tutorial/
class ThreadLeitura(QThread):
def __init__(self, conexao_porta ):
QThread.__init__(self)
self.porta = conexao_porta
def __del__(self):
self.wait()
def run(self):
sleep(1)
tempo_sensor['contador_do_timer'][-1] = tempo_sensor['contador_do_timer'][0] + 1.01 # variave que aparece no lcd (sempre somar pois é o tempo pra ler todo mundo )''
self.porta.flushOutput()
self.porta.flushInput()
class ExampleApp(QMainWindow, base.Ui_MainWindow):
def __init__(self, parent=None):
super(ExampleApp, self).__init__(parent)
# TIMER
self.meu_timer = QTimer()
# CONEXAO SERIAL
self.port = None
if self.port is None:
self.port = ExampleApp.get_arduino_serial_port()
self.baudrate = 115200
self.conexao = serial.Serial(self.port, self.baudrate)
self.conexao.write(b'I')
print("conectado")
# CHAMADAS DE SETUP
self.setupUi(self)
self.setup_signals_connections()
# VARIAVEIS DE INICIALIZAÇÃO
self.label_7.setPixmap(QtGui.QPixmap("pista1.jpeg"))
self.label_6.setPixmap(QtGui.QPixmap("fundo.jpg"))
self.sensor = 0
self.tempo_sensorA = 0
self.tempo_sensorB = 0
self.tempo_sensorC = 0
self.tempo_sensorD = 0
self.tempo_sensorE = 0
print("Codigo comecou a rodar")
@staticmethod
def get_arduino_serial_port():
"""
Tries to found a serial port compatible.
If there is only one serial port available, return this one.
Otherwise it will verify the manufacturer of all serial ports
and compares with the manufacturer defined in the ArduinoConstants.
This method will return the first match.
If no one has found, it will return a empty string.
:return: Serial Port String
"""
serial_ports = serial_tools.comports()
if len(serial_ports) == 0:
return ""
if len(serial_ports) == 1:
return serial_ports[0].device
for serial_port_found in serial_ports:
if serial_port_found.manufacturer == 'Arduino (www.arduino.cc)':
return serial_port_found.device
return ""
def setup_signals_connections(self):
#self.Placar.clicked.
self.Iniciar.clicked.connect(self.btn_clicado)
self.Finalizar.clicked.connect(self.btn_desclicado)
self.meu_timer.timeout.connect(self.Loop)
self.myThread = ThreadLeitura(self.conexao)
def btn_clicado(self):
self.reset()
self.conectar()
self.iniciar()
self.label_6.setPixmap(QtGui.QPixmap("Inicio.jpeg"))
self.meu_timer.start(10)
self.conexao.flushOutput()
self.conexao.flushInput()
def btn_desclicado(self):
self.conexao.flushOutput()
self.conexao.flushInput()
self.label_6.setPixmap(QtGui.QPixmap("fundo.jpg"))
self.finalizar()
self.conexao.flushOutput()
self.conexao.flushInput()
self.meu_timer.stop()
# self.salvar_arquivo_txt()
self.reset()
def Loop(self):
self.Requisitar_Ler()
self.valida_percurso()
self.Printar_display()
self.myThread.start()
def iniciar(self):
self.conexao.write(b'I')
def finalizar(self):
self.conexao.write(b'A')
def reset(self):
global tempo_sensor
tempo_sensor = {
'contador_do_timer' : [0],
'A' : [0],
'B' : [0],
'C' : [0],
'D' : [0],
'E' : [0],
'A_B' : [0],
'B_C' : [0],
'C_D' : [0],
'D_A' : [0]
}
self.conexao.flushOutput()
self.conexao.flushInput()
self.sensor = 0
self.tempo_sensorA = 0
self.tempo_sensorB = 0
self.tempo_sensorC = 0
self.tempo_sensorD = 0
self.tempo_sensorE = 0
def conectar(self):
"""Caso não esteja conectado, abre uma nova conexão.
"""
if not self.conexao.is_open:
self.conexao = serial.Serial(self.port_name, self.baudrate, timeout=0.5)
self.conexao.flushInput()
self.conexao.flushOutput()
def Requisitar_Ler(self):
self.conexao.write(b'R')
aux1 =self.conexao.inWaiting() #serve pra ver quantos bytes tem na fila
if aux1 != None : #se tiver byte pra ler
self.sensor = ord(self.conexao.read(1)) #Lê qual sensor é
if self.sensor == 49: #se for sensor 1 ( 1 = 49 em ascii)
self.tempo_sensorA = ord(self.conexao.read(1)) # lê primeiro valor, High valor
self.tempo_sensorA = (self.tempo_sensorA * 256) + ord(self.conexao.read(1)) # lê segundo valor (low) e transforma para o numero real que foi enviado antes de ser convertido
tempo_sensor['A'][-1] = self.tempo_sensorA * 0.005; # calcula o tempo
print("sensorA")
print(tempo_sensor['A'][-1])
elif self.sensor == 50:
self.tempo_sensorB = ord(self.conexao.read(1)) # lê primeiro valor, High valor
self.tempo_sensorB = (self.tempo_sensorB * 256) + ord(self.conexao.read(1)) # lê segundo valor (low) e transforma para o numero real que foi enviado antes de ser convertido
tempo_sensor['B'][-1] = self.tempo_sensorB * 0.005; # calcula o tempo
print("sensorB")
print(tempo_sensor['B'][-1])
elif self.sensor == 51:
self.tempo_sensorC = ord(self.conexao.read(1))
self.tempo_sensorC = (self.tempo_sensorC * 256) + ord(self.conexao.read(1))
tempo_sensor['C'][-1] = self.tempo_sensorC * 0.005; # calcula o tempo
print("sensorC")
print(tempo_sensor['C'][-1])
elif self.sensor == 52:
self.tempo_sensorD = ord(self.conexao.read(1))
self.tempo_sensorD = (self.tempo_sensorD * 256) + ord(self.conexao.read(1))
tempo_sensor['D'][-1] = self.tempo_sensorD * 0.005; # calcula o tempo
print("sensorD")
print(tempo_sensor['D'][-1])
elif self.sensor == 53:
self.tempo_sensorE = odr(self.conexao.read(1))
self.tempo_sensorE = (self.tempo_sensorE * 256) + ord(self.conexao.read(1))
tempo_sensor['E'][-1] = self.tempo_sensorE * 0.005; # calcula o tempo
print("sensorE")
print(tempo_sensor['E'][-1])
else:
print("não achou sensor")
else:
print("não chegou dados")
def valida_percurso(self):
"""Verifica o novo valor lido e compara para saber se houve ou está
ocorrendo a passagem por um checkpoint.
Defeitos Conhecidos: O valor oscila enquanto o robô esta passando ( a leitura é feita em 0,5s, não da pra perceber)
e se estabiliza robô o carrinho terminar de passar.
Ele está implementado através de uma serie de condicionais que são
especificas para a pista do TUR 2018 e o modo como os sensores estão
dispostos.
Esta não é a melhor implementação, existem melhores. Mas através de todos
esses 'if' e 'else' é possível determinar onde o robô está.
"""
if tempo_sensor['A'][-1] > 0:
if tempo_sensor['B'][-1]>tempo_sensor['A'][-1] and tempo_sensor['C'][-1] == 0 and tempo_sensor['D'][-1] == 0:
tempo_sensor['A_B'][-1] = tempo_sensor['B'][-1] - tempo_sensor['A'][-1]
self.label_6.setPixmap(QtGui.QPixmap("trechoAB.jpeg"))
if tempo_sensor['A'][-1] < tempo_sensor['B'][-1] and tempo_sensor['C'][-1] > tempo_sensor['B'][-1] and tempo_sensor['D'][-1] == 0:
tempo_sensor['B_C'][-1] = tempo_sensor['C'][-1] - tempo_sensor['B'][-1]
self.label_6.setPixmap(QtGui.QPixmap("trechoBC.jpeg"))
if tempo_sensor['A'][-1] < tempo_sensor['D'][-1] and tempo_sensor['B'][-1] < tempo_sensor['C'][-1] and tempo_sensor['D'][-1] > tempo_sensor['C'][-1]:
tempo_sensor['C_D'][-1] = tempo_sensor['D'][-1] - tempo_sensor['C'][-1]
self.label_6.setPixmap(QtGui.QPixmap("trechoCD.jpeg"))
if tempo_sensor['A'][-1] > tempo_sensor['D'][-1] and tempo_sensor['B'][-1] < tempo_sensor['C'][-1] and tempo_sensor['C'][-1] < tempo_sensor['D'][-1] and tempo_sensor['A'][-1] > tempo_sensor['B'][-1] and tempo_sensor['A'][-1] > tempo_sensor['C'][-1]:
tempo_sensor['D_A'][-1] = tempo_sensor['A'][-1] - tempo_sensor['D'][-1]
self.label_6.setPixmap(QtGui.QPixmap("trechoDA.jpeg"))
def Printar_display(self):
""" Incrementa o tempo nos LCDs disponíveis na interface.
"""
self.lcdNumber.display(int(tempo_sensor['contador_do_timer'][-1]))
self.lcdNumber_2.display(tempo_sensor['A_B'][-1])
self.lcdNumber_3.display(tempo_sensor['B_C'][-1])
self.lcdNumber_4.display(tempo_sensor['C_D'][-1])
self.lcdNumber_5.display(tempo_sensor['D_A'][-1])
self.lcdNumber_6.display(0)
def main():
app = QApplication(sys.argv)
form = ExampleApp()
form.show()
app.exec_()
if __name__ == "__main__":
main()
sys.exit(app.exec_())
| 39.908745 | 262 | 0.602801 | import sys
from time import *
import serial
import serial.tools.list_ports as serial_tools
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5 import QtCore, QtGui, QtWidgets
import pista as base
global tempo_sensor
tempo_sensor = {
'contador_do_timer' : [0],
'A' : [0],
'B' : [0],
'C' : [0],
'D' : [0],
'E' : [0],
'A_B' : [0],
'B_C' : [0],
'C_D' : [0],
'D_A' : [0]
}
class ThreadLeitura(QThread):
def __init__(self, conexao_porta ):
QThread.__init__(self)
self.porta = conexao_porta
def __del__(self):
self.wait()
def run(self):
sleep(1)
tempo_sensor['contador_do_timer'][-1] = tempo_sensor['contador_do_timer'][0] + 1.01
self.porta.flushOutput()
self.porta.flushInput()
class ExampleApp(QMainWindow, base.Ui_MainWindow):
def __init__(self, parent=None):
super(ExampleApp, self).__init__(parent)
self.meu_timer = QTimer()
self.port = None
if self.port is None:
self.port = ExampleApp.get_arduino_serial_port()
self.baudrate = 115200
self.conexao = serial.Serial(self.port, self.baudrate)
self.conexao.write(b'I')
print("conectado")
self.setupUi(self)
self.setup_signals_connections()
self.label_7.setPixmap(QtGui.QPixmap("pista1.jpeg"))
self.label_6.setPixmap(QtGui.QPixmap("fundo.jpg"))
self.sensor = 0
self.tempo_sensorA = 0
self.tempo_sensorB = 0
self.tempo_sensorC = 0
self.tempo_sensorD = 0
self.tempo_sensorE = 0
print("Codigo comecou a rodar")
@staticmethod
def get_arduino_serial_port():
serial_ports = serial_tools.comports()
if len(serial_ports) == 0:
return ""
if len(serial_ports) == 1:
return serial_ports[0].device
for serial_port_found in serial_ports:
if serial_port_found.manufacturer == 'Arduino (www.arduino.cc)':
return serial_port_found.device
return ""
def setup_signals_connections(self):
self.Iniciar.clicked.connect(self.btn_clicado)
self.Finalizar.clicked.connect(self.btn_desclicado)
self.meu_timer.timeout.connect(self.Loop)
self.myThread = ThreadLeitura(self.conexao)
def btn_clicado(self):
self.reset()
self.conectar()
self.iniciar()
self.label_6.setPixmap(QtGui.QPixmap("Inicio.jpeg"))
self.meu_timer.start(10)
self.conexao.flushOutput()
self.conexao.flushInput()
def btn_desclicado(self):
self.conexao.flushOutput()
self.conexao.flushInput()
self.label_6.setPixmap(QtGui.QPixmap("fundo.jpg"))
self.finalizar()
self.conexao.flushOutput()
self.conexao.flushInput()
self.meu_timer.stop()
self.reset()
def Loop(self):
self.Requisitar_Ler()
self.valida_percurso()
self.Printar_display()
self.myThread.start()
def iniciar(self):
self.conexao.write(b'I')
def finalizar(self):
self.conexao.write(b'A')
def reset(self):
global tempo_sensor
tempo_sensor = {
'contador_do_timer' : [0],
'A' : [0],
'B' : [0],
'C' : [0],
'D' : [0],
'E' : [0],
'A_B' : [0],
'B_C' : [0],
'C_D' : [0],
'D_A' : [0]
}
self.conexao.flushOutput()
self.conexao.flushInput()
self.sensor = 0
self.tempo_sensorA = 0
self.tempo_sensorB = 0
self.tempo_sensorC = 0
self.tempo_sensorD = 0
self.tempo_sensorE = 0
def conectar(self):
if not self.conexao.is_open:
self.conexao = serial.Serial(self.port_name, self.baudrate, timeout=0.5)
self.conexao.flushInput()
self.conexao.flushOutput()
def Requisitar_Ler(self):
self.conexao.write(b'R')
aux1 =self.conexao.inWaiting()
if aux1 != None :
self.sensor = ord(self.conexao.read(1))
if self.sensor == 49:
self.tempo_sensorA = ord(self.conexao.read(1))
self.tempo_sensorA = (self.tempo_sensorA * 256) + ord(self.conexao.read(1))
tempo_sensor['A'][-1] = self.tempo_sensorA * 0.005;
print("sensorA")
print(tempo_sensor['A'][-1])
elif self.sensor == 50:
self.tempo_sensorB = ord(self.conexao.read(1))
self.tempo_sensorB = (self.tempo_sensorB * 256) + ord(self.conexao.read(1))
tempo_sensor['B'][-1] = self.tempo_sensorB * 0.005;
print("sensorB")
print(tempo_sensor['B'][-1])
elif self.sensor == 51:
self.tempo_sensorC = ord(self.conexao.read(1))
self.tempo_sensorC = (self.tempo_sensorC * 256) + ord(self.conexao.read(1))
tempo_sensor['C'][-1] = self.tempo_sensorC * 0.005;
print("sensorC")
print(tempo_sensor['C'][-1])
elif self.sensor == 52:
self.tempo_sensorD = ord(self.conexao.read(1))
self.tempo_sensorD = (self.tempo_sensorD * 256) + ord(self.conexao.read(1))
tempo_sensor['D'][-1] = self.tempo_sensorD * 0.005;
print("sensorD")
print(tempo_sensor['D'][-1])
elif self.sensor == 53:
self.tempo_sensorE = odr(self.conexao.read(1))
self.tempo_sensorE = (self.tempo_sensorE * 256) + ord(self.conexao.read(1))
tempo_sensor['E'][-1] = self.tempo_sensorE * 0.005;
print("sensorE")
print(tempo_sensor['E'][-1])
else:
print("não achou sensor")
else:
print("não chegou dados")
def valida_percurso(self):
if tempo_sensor['A'][-1] > 0:
if tempo_sensor['B'][-1]>tempo_sensor['A'][-1] and tempo_sensor['C'][-1] == 0 and tempo_sensor['D'][-1] == 0:
tempo_sensor['A_B'][-1] = tempo_sensor['B'][-1] - tempo_sensor['A'][-1]
self.label_6.setPixmap(QtGui.QPixmap("trechoAB.jpeg"))
if tempo_sensor['A'][-1] < tempo_sensor['B'][-1] and tempo_sensor['C'][-1] > tempo_sensor['B'][-1] and tempo_sensor['D'][-1] == 0:
tempo_sensor['B_C'][-1] = tempo_sensor['C'][-1] - tempo_sensor['B'][-1]
self.label_6.setPixmap(QtGui.QPixmap("trechoBC.jpeg"))
if tempo_sensor['A'][-1] < tempo_sensor['D'][-1] and tempo_sensor['B'][-1] < tempo_sensor['C'][-1] and tempo_sensor['D'][-1] > tempo_sensor['C'][-1]:
tempo_sensor['C_D'][-1] = tempo_sensor['D'][-1] - tempo_sensor['C'][-1]
self.label_6.setPixmap(QtGui.QPixmap("trechoCD.jpeg"))
if tempo_sensor['A'][-1] > tempo_sensor['D'][-1] and tempo_sensor['B'][-1] < tempo_sensor['C'][-1] and tempo_sensor['C'][-1] < tempo_sensor['D'][-1] and tempo_sensor['A'][-1] > tempo_sensor['B'][-1] and tempo_sensor['A'][-1] > tempo_sensor['C'][-1]:
tempo_sensor['D_A'][-1] = tempo_sensor['A'][-1] - tempo_sensor['D'][-1]
self.label_6.setPixmap(QtGui.QPixmap("trechoDA.jpeg"))
def Printar_display(self):
self.lcdNumber.display(int(tempo_sensor['contador_do_timer'][-1]))
self.lcdNumber_2.display(tempo_sensor['A_B'][-1])
self.lcdNumber_3.display(tempo_sensor['B_C'][-1])
self.lcdNumber_4.display(tempo_sensor['C_D'][-1])
self.lcdNumber_5.display(tempo_sensor['D_A'][-1])
self.lcdNumber_6.display(0)
def main():
app = QApplication(sys.argv)
form = ExampleApp()
form.show()
app.exec_()
if __name__ == "__main__":
main()
sys.exit(app.exec_())
| true | true |
f73ab2948daa58260ac313e2f31c7dce7b616281 | 6,411 | py | Python | toontown/betaevent/DistributedBetaEvent.py | LittleNed/toontown-stride | 1252a8f9a8816c1810106006d09c8bdfe6ad1e57 | [
"Apache-2.0"
] | 1 | 2018-06-16T23:06:38.000Z | 2018-06-16T23:06:38.000Z | toontown/betaevent/DistributedBetaEvent.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | null | null | null | toontown/betaevent/DistributedBetaEvent.py | NoraTT/Historical-Commits-Project-Altis-Source | fe88e6d07edf418f7de6ad5b3d9ecb3d0d285179 | [
"Apache-2.0"
] | 4 | 2019-06-20T23:45:23.000Z | 2020-10-14T20:30:15.000Z | from panda3d.core import Point3, VBase3, Vec3, Vec4
from toontown.betaevent.DistributedEvent import DistributedEvent
from toontown.betaevent import CogTV
from toontown.hood import ZoneUtil
from direct.fsm import ClassicFSM, State
from direct.interval.IntervalGlobal import *
from toontown.toon import Toon, ToonDNA
from direct.actor.Actor import Actor
from otp.avatar import Avatar
from toontown.chat.ChatGlobals import *
from toontown.nametag.NametagGroup import *
from toontown.suit import DistributedSuitBase, SuitDNA
from toontown.toon import NPCToons
from toontown.betaevent import BetaEventGlobals as BEGlobals
from toontown.battle import BattleParticles
class DistributedBetaEvent(DistributedEvent):
notify = directNotify.newCategory('DistributedBetaEvent')
def __init__(self, cr):
DistributedEvent.__init__(self, cr)
self.cr = cr
self.spark = loader.loadSfx('phase_11/audio/sfx/LB_sparks_1.ogg') # i think this could be used somewhere
# Create prepostera
self.prepostera = Toon.Toon()
self.prepostera.setName('Professor Prepostera')
self.prepostera.setPickable(0)
self.prepostera.setPlayerType(NametagGlobals.CCNonPlayer)
dna = ToonDNA.ToonDNA()
dna.newToonFromProperties('hss', 'ms','m', 'm', 20, 0, 20, 20, 97, 27, 86, 27, 37, 27)
self.prepostera.setDNA(dna)
self.prepostera.loop('scientistEmcee')
self.prepostera.reparentTo(render)
self.prepostera.setPosHpr(4, -3, 1, 0, 0, 0)
self.prepostera.blinkEyes()
self.prepostera.head = self.prepostera.find('**/__Actor_head')
self.prepostera.initializeBodyCollisions('toon')
self.headHoncho1 = DistributedSuitBase.DistributedSuitBase(self.cr)
headHoncho1suitDNA = SuitDNA.SuitDNA()
headHoncho1suitDNA.newSuit('hho')
self.headHoncho1.setDNA(headHoncho1suitDNA)
self.headHoncho1.setDisplayName('???')
self.headHoncho1.setPickable(0)
self.headHoncho1.setPosHpr(0, 0, 0, 0, 0, 0)
self.headHoncho1.reparentTo(render)
self.headHoncho1.doId = 0
self.headHoncho1.hide()
self.headHoncho1.initializeBodyCollisions('toon')
middlemanDNA = SuitDNA.SuitDNA()
middlemanDNA.newSuit('mdm')
self.middleman1 = DistributedSuitBase.DistributedSuitBase(self.cr)
self.middleman1.setDNA(middlemanDNA)
self.middleman1.setDisplayName('Middleman')
self.middleman1.setPickable(0)
self.middleman1.setPosHpr(0, 0, 0, 0, 0, 0)
self.middleman1.reparentTo(render)
self.middleman1.doId = 1
self.middleman1.hide()
self.middleman1.initializeBodyCollisions('toon')
self.middleman2 = DistributedSuitBase.DistributedSuitBase(self.cr)
self.middleman2.setDNA(middlemanDNA)
self.middleman2.setDisplayName('Middleman')
self.middleman2.setPickable(0)
self.middleman2.setPosHpr(0, 0, 0, 0, 0, 0)
self.middleman2.reparentTo(render)
self.middleman2.doId = 2
self.middleman2.hide()
self.middleman2.initializeBodyCollisions('toon')
#base.musicManager.stopAllSounds()
self.toonMusic = loader.loadMusic('phase_14/audio/bgm/tt2_ambient_1.mp3') # Placeholder
#base.playMusic(self.toonMusic, looping = 1)
def announceGenerate(self):
DistributedEvent.announceGenerate(self)
def start(self):
pass
def delete(self):
DistributedEvent.delete(self)
self.prepostera.delete()
def enterStartBd(self, timestamp):
self.prepostera.animFSM.request('TeleportIn')
def exitStartBd(self):
pass
def enterCogInvade(self, timestamp):
self.headHoncho1.setPosHpr(0, 0, 0, 0, 0, 0)
self.headHoncho1.show()
Sequence(
self.headHoncho1.beginSupaFlyMove(Vec3(12, -4, 1), True, "firstCogInvadeFlyIn", walkAfterLanding=False),
Func(self.headHoncho1.loop, 'walk'),
self.headHoncho1.hprInterval(2, VBase3(90, 0, 0)),
Func(self.headHoncho1.loop, 'neutral'),
Wait(1),
Func(self.headHoncho1.setChatAbsolute, 'Hello Toon...', CFSpeech|CFTimeout),
Wait(4),
Func(self.headHoncho1.setChatAbsolute, "I'd hate to crash the party...", CFSpeech|CFTimeout),
Wait(4),
Func(self.headHoncho1.setChatAbsolute, "Actually... I'd love to!", CFSpeech|CFTimeout)
).start()
def exitCogInvade(self):
pass
def enterCogTalk(self, timestamp):
self.middleman1.show()
self.middleman2.show()
Sequence(
Func(self.headHoncho1.setChatAbsolute, 'I hear you wanted to open Loony Labs...', CFSpeech|CFTimeout),
Wait(4),
Parallel(
self.middleman1.beginSupaFlyMove(Vec3(-8, -4, 1), True, "firstCogInvadeFlyIn", walkAfterLanding=False),
self.middleman2.beginSupaFlyMove(Vec3(4, -12, 1), True, "firstCogInvadeFlyIn", walkAfterLanding=False)
),
Func(self.middleman2.loop, 'neutral'),
Parallel(
Sequence(
Func(self.middleman1.loop, 'walk'),
self.middleman1.hprInterval(2, VBase3(-90, 0, 0)),
Func(self.middleman1.loop, 'neutral')),
Func(self.headHoncho1.setChatAbsolute, "How well did that go for you?", CFSpeech|CFTimeout))
).start()
def exitCogTalk(self):
pass
def enterCogTakeover(self, timestamp):
pass
def exitCogTakeover(self):
pass
def enterCredits(self, timestamp):
import CreditsScreen
self.credits = CreditsScreen.CreditsScreen()
self.credits.startCredits()
def exitCredits(self):
pass
def toonTalk(self, phrase, toon):
toon.setChatAbsolute(phrase, CFSpeech|CFTimeout)
| 42.177632 | 130 | 0.610357 | from panda3d.core import Point3, VBase3, Vec3, Vec4
from toontown.betaevent.DistributedEvent import DistributedEvent
from toontown.betaevent import CogTV
from toontown.hood import ZoneUtil
from direct.fsm import ClassicFSM, State
from direct.interval.IntervalGlobal import *
from toontown.toon import Toon, ToonDNA
from direct.actor.Actor import Actor
from otp.avatar import Avatar
from toontown.chat.ChatGlobals import *
from toontown.nametag.NametagGroup import *
from toontown.suit import DistributedSuitBase, SuitDNA
from toontown.toon import NPCToons
from toontown.betaevent import BetaEventGlobals as BEGlobals
from toontown.battle import BattleParticles
class DistributedBetaEvent(DistributedEvent):
notify = directNotify.newCategory('DistributedBetaEvent')
def __init__(self, cr):
DistributedEvent.__init__(self, cr)
self.cr = cr
self.spark = loader.loadSfx('phase_11/audio/sfx/LB_sparks_1.ogg')
self.prepostera = Toon.Toon()
self.prepostera.setName('Professor Prepostera')
self.prepostera.setPickable(0)
self.prepostera.setPlayerType(NametagGlobals.CCNonPlayer)
dna = ToonDNA.ToonDNA()
dna.newToonFromProperties('hss', 'ms','m', 'm', 20, 0, 20, 20, 97, 27, 86, 27, 37, 27)
self.prepostera.setDNA(dna)
self.prepostera.loop('scientistEmcee')
self.prepostera.reparentTo(render)
self.prepostera.setPosHpr(4, -3, 1, 0, 0, 0)
self.prepostera.blinkEyes()
self.prepostera.head = self.prepostera.find('**/__Actor_head')
self.prepostera.initializeBodyCollisions('toon')
self.headHoncho1 = DistributedSuitBase.DistributedSuitBase(self.cr)
headHoncho1suitDNA = SuitDNA.SuitDNA()
headHoncho1suitDNA.newSuit('hho')
self.headHoncho1.setDNA(headHoncho1suitDNA)
self.headHoncho1.setDisplayName('???')
self.headHoncho1.setPickable(0)
self.headHoncho1.setPosHpr(0, 0, 0, 0, 0, 0)
self.headHoncho1.reparentTo(render)
self.headHoncho1.doId = 0
self.headHoncho1.hide()
self.headHoncho1.initializeBodyCollisions('toon')
middlemanDNA = SuitDNA.SuitDNA()
middlemanDNA.newSuit('mdm')
self.middleman1 = DistributedSuitBase.DistributedSuitBase(self.cr)
self.middleman1.setDNA(middlemanDNA)
self.middleman1.setDisplayName('Middleman')
self.middleman1.setPickable(0)
self.middleman1.setPosHpr(0, 0, 0, 0, 0, 0)
self.middleman1.reparentTo(render)
self.middleman1.doId = 1
self.middleman1.hide()
self.middleman1.initializeBodyCollisions('toon')
self.middleman2 = DistributedSuitBase.DistributedSuitBase(self.cr)
self.middleman2.setDNA(middlemanDNA)
self.middleman2.setDisplayName('Middleman')
self.middleman2.setPickable(0)
self.middleman2.setPosHpr(0, 0, 0, 0, 0, 0)
self.middleman2.reparentTo(render)
self.middleman2.doId = 2
self.middleman2.hide()
self.middleman2.initializeBodyCollisions('toon')
self.toonMusic = loader.loadMusic('phase_14/audio/bgm/tt2_ambient_1.mp3')
def announceGenerate(self):
DistributedEvent.announceGenerate(self)
def start(self):
pass
def delete(self):
DistributedEvent.delete(self)
self.prepostera.delete()
def enterStartBd(self, timestamp):
self.prepostera.animFSM.request('TeleportIn')
def exitStartBd(self):
pass
def enterCogInvade(self, timestamp):
self.headHoncho1.setPosHpr(0, 0, 0, 0, 0, 0)
self.headHoncho1.show()
Sequence(
self.headHoncho1.beginSupaFlyMove(Vec3(12, -4, 1), True, "firstCogInvadeFlyIn", walkAfterLanding=False),
Func(self.headHoncho1.loop, 'walk'),
self.headHoncho1.hprInterval(2, VBase3(90, 0, 0)),
Func(self.headHoncho1.loop, 'neutral'),
Wait(1),
Func(self.headHoncho1.setChatAbsolute, 'Hello Toon...', CFSpeech|CFTimeout),
Wait(4),
Func(self.headHoncho1.setChatAbsolute, "I'd hate to crash the party...", CFSpeech|CFTimeout),
Wait(4),
Func(self.headHoncho1.setChatAbsolute, "Actually... I'd love to!", CFSpeech|CFTimeout)
).start()
def exitCogInvade(self):
pass
def enterCogTalk(self, timestamp):
self.middleman1.show()
self.middleman2.show()
Sequence(
Func(self.headHoncho1.setChatAbsolute, 'I hear you wanted to open Loony Labs...', CFSpeech|CFTimeout),
Wait(4),
Parallel(
self.middleman1.beginSupaFlyMove(Vec3(-8, -4, 1), True, "firstCogInvadeFlyIn", walkAfterLanding=False),
self.middleman2.beginSupaFlyMove(Vec3(4, -12, 1), True, "firstCogInvadeFlyIn", walkAfterLanding=False)
),
Func(self.middleman2.loop, 'neutral'),
Parallel(
Sequence(
Func(self.middleman1.loop, 'walk'),
self.middleman1.hprInterval(2, VBase3(-90, 0, 0)),
Func(self.middleman1.loop, 'neutral')),
Func(self.headHoncho1.setChatAbsolute, "How well did that go for you?", CFSpeech|CFTimeout))
).start()
def exitCogTalk(self):
pass
def enterCogTakeover(self, timestamp):
pass
def exitCogTakeover(self):
pass
def enterCredits(self, timestamp):
import CreditsScreen
self.credits = CreditsScreen.CreditsScreen()
self.credits.startCredits()
def exitCredits(self):
pass
def toonTalk(self, phrase, toon):
toon.setChatAbsolute(phrase, CFSpeech|CFTimeout)
| true | true |
f73ab338ff262c2299f7e169a94f5f4c2fb66501 | 4,826 | py | Python | goldeneye/commands.py | smartpigling/goldeneye | 704ee16ca89cb0b8a9b7c23216689e04d9f07a92 | [
"BSD-3-Clause"
] | null | null | null | goldeneye/commands.py | smartpigling/goldeneye | 704ee16ca89cb0b8a9b7c23216689e04d9f07a92 | [
"BSD-3-Clause"
] | null | null | null | goldeneye/commands.py | smartpigling/goldeneye | 704ee16ca89cb0b8a9b7c23216689e04d9f07a92 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Click commands."""
import os
from glob import glob
from subprocess import call
import click
from flask import current_app
from flask.cli import with_appcontext
from werkzeug.exceptions import MethodNotAllowed, NotFound
HERE = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.join(HERE, os.pardir)
TEST_PATH = os.path.join(PROJECT_ROOT, 'tests')
@click.command()
def test():
"""Run the tests."""
import pytest
rv = pytest.main([TEST_PATH, '--verbose'])
exit(rv)
@click.command()
@click.option('-f', '--fix-imports', default=False, is_flag=True,
help='Fix imports using isort, before linting')
def lint(fix_imports):
"""Lint and check code style with flake8 and isort."""
skip = ['node_modules', 'requirements']
root_files = glob('*.py')
root_directories = [
name for name in next(os.walk('.'))[1] if not name.startswith('.')]
files_and_directories = [
arg for arg in root_files + root_directories if arg not in skip]
def execute_tool(description, *args):
"""Execute a checking tool with its arguments."""
command_line = list(args) + files_and_directories
click.echo('{}: {}'.format(description, ' '.join(command_line)))
rv = call(command_line)
if rv != 0:
exit(rv)
if fix_imports:
execute_tool('Fixing import order', 'isort', '-rc')
execute_tool('Checking code style', 'flake8')
@click.command()
def clean():
"""Remove *.pyc and *.pyo files recursively starting at current directory.
Borrowed from Flask-Script, converted to use Click.
"""
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.pyc') or filename.endswith('.pyo'):
full_pathname = os.path.join(dirpath, filename)
click.echo('Removing {}'.format(full_pathname))
os.remove(full_pathname)
@click.command()
@click.option('--url', default=None,
help='Url to test (ex. /static/image.png)')
@click.option('--order', default='rule',
help='Property on Rule to order by (default: rule)')
@with_appcontext
def urls(url, order):
"""Display all of the url matching routes for the project.
Borrowed from Flask-Script, converted to use Click.
"""
rows = []
column_length = 0
column_headers = ('Rule', 'Endpoint', 'Arguments')
if url:
try:
rule, arguments = (
current_app.url_map
.bind('localhost')
.match(url, return_rule=True))
rows.append((rule.rule, rule.endpoint, arguments))
column_length = 3
except (NotFound, MethodNotAllowed) as e:
rows.append(('<{}>'.format(e), None, None))
column_length = 1
else:
rules = sorted(
current_app.url_map.iter_rules(),
key=lambda rule: getattr(rule, order))
for rule in rules:
rows.append((rule.rule, rule.endpoint, None))
column_length = 2
str_template = ''
table_width = 0
if column_length >= 1:
max_rule_length = max(len(r[0]) for r in rows)
max_rule_length = max_rule_length if max_rule_length > 4 else 4
str_template += '{:' + str(max_rule_length) + '}'
table_width += max_rule_length
if column_length >= 2:
max_endpoint_length = max(len(str(r[1])) for r in rows)
# max_endpoint_length = max(rows, key=len)
max_endpoint_length = (
max_endpoint_length if max_endpoint_length > 8 else 8)
str_template += ' {:' + str(max_endpoint_length) + '}'
table_width += 2 + max_endpoint_length
if column_length >= 3:
max_arguments_length = max(len(str(r[2])) for r in rows)
max_arguments_length = (
max_arguments_length if max_arguments_length > 9 else 9)
str_template += ' {:' + str(max_arguments_length) + '}'
table_width += 2 + max_arguments_length
click.echo(str_template.format(*column_headers[:column_length]))
click.echo('-' * table_width)
for row in rows:
click.echo(str_template.format(*row[:column_length]))
@click.command()
@with_appcontext
def initdb():
"""
init system data
:return:
"""
from goldeneye.user.models import User, Role, Permission
perms = [
Permission(name='系统测试1', slug='public.about', type='N'),
Permission(name='系统测试2', slug='public.about1', type='N'),
Permission(name='系统测试3', slug='public.about2', type='N')
]
role = Role.create(name='系统管理员', permissions=perms)
user = User(username='admin', email='admin@goldeneye.com', password='admin')
user.roles.append(role)
user.save()
| 32.608108 | 80 | 0.618525 |
import os
from glob import glob
from subprocess import call
import click
from flask import current_app
from flask.cli import with_appcontext
from werkzeug.exceptions import MethodNotAllowed, NotFound
HERE = os.path.abspath(os.path.dirname(__file__))
PROJECT_ROOT = os.path.join(HERE, os.pardir)
TEST_PATH = os.path.join(PROJECT_ROOT, 'tests')
@click.command()
def test():
import pytest
rv = pytest.main([TEST_PATH, '--verbose'])
exit(rv)
@click.command()
@click.option('-f', '--fix-imports', default=False, is_flag=True,
help='Fix imports using isort, before linting')
def lint(fix_imports):
skip = ['node_modules', 'requirements']
root_files = glob('*.py')
root_directories = [
name for name in next(os.walk('.'))[1] if not name.startswith('.')]
files_and_directories = [
arg for arg in root_files + root_directories if arg not in skip]
def execute_tool(description, *args):
command_line = list(args) + files_and_directories
click.echo('{}: {}'.format(description, ' '.join(command_line)))
rv = call(command_line)
if rv != 0:
exit(rv)
if fix_imports:
execute_tool('Fixing import order', 'isort', '-rc')
execute_tool('Checking code style', 'flake8')
@click.command()
def clean():
for dirpath, dirnames, filenames in os.walk('.'):
for filename in filenames:
if filename.endswith('.pyc') or filename.endswith('.pyo'):
full_pathname = os.path.join(dirpath, filename)
click.echo('Removing {}'.format(full_pathname))
os.remove(full_pathname)
@click.command()
@click.option('--url', default=None,
help='Url to test (ex. /static/image.png)')
@click.option('--order', default='rule',
help='Property on Rule to order by (default: rule)')
@with_appcontext
def urls(url, order):
rows = []
column_length = 0
column_headers = ('Rule', 'Endpoint', 'Arguments')
if url:
try:
rule, arguments = (
current_app.url_map
.bind('localhost')
.match(url, return_rule=True))
rows.append((rule.rule, rule.endpoint, arguments))
column_length = 3
except (NotFound, MethodNotAllowed) as e:
rows.append(('<{}>'.format(e), None, None))
column_length = 1
else:
rules = sorted(
current_app.url_map.iter_rules(),
key=lambda rule: getattr(rule, order))
for rule in rules:
rows.append((rule.rule, rule.endpoint, None))
column_length = 2
str_template = ''
table_width = 0
if column_length >= 1:
max_rule_length = max(len(r[0]) for r in rows)
max_rule_length = max_rule_length if max_rule_length > 4 else 4
str_template += '{:' + str(max_rule_length) + '}'
table_width += max_rule_length
if column_length >= 2:
max_endpoint_length = max(len(str(r[1])) for r in rows)
max_endpoint_length = (
max_endpoint_length if max_endpoint_length > 8 else 8)
str_template += ' {:' + str(max_endpoint_length) + '}'
table_width += 2 + max_endpoint_length
if column_length >= 3:
max_arguments_length = max(len(str(r[2])) for r in rows)
max_arguments_length = (
max_arguments_length if max_arguments_length > 9 else 9)
str_template += ' {:' + str(max_arguments_length) + '}'
table_width += 2 + max_arguments_length
click.echo(str_template.format(*column_headers[:column_length]))
click.echo('-' * table_width)
for row in rows:
click.echo(str_template.format(*row[:column_length]))
@click.command()
@with_appcontext
def initdb():
from goldeneye.user.models import User, Role, Permission
perms = [
Permission(name='系统测试1', slug='public.about', type='N'),
Permission(name='系统测试2', slug='public.about1', type='N'),
Permission(name='系统测试3', slug='public.about2', type='N')
]
role = Role.create(name='系统管理员', permissions=perms)
user = User(username='admin', email='admin@goldeneye.com', password='admin')
user.roles.append(role)
user.save()
| true | true |
f73ab6176ab8fe472d5242378c8bc7c181ac542c | 4,666 | py | Python | tests/test_chi_citycouncil.py | danielahuang/city-scrapers | 711d1995f59100793e771068a6f5d9149e773412 | [
"MIT"
] | null | null | null | tests/test_chi_citycouncil.py | danielahuang/city-scrapers | 711d1995f59100793e771068a6f5d9149e773412 | [
"MIT"
] | null | null | null | tests/test_chi_citycouncil.py | danielahuang/city-scrapers | 711d1995f59100793e771068a6f5d9149e773412 | [
"MIT"
] | null | null | null | import json
from datetime import date, time
from urllib.parse import parse_qs
import pytest
from freezegun import freeze_time
from tests.utils import file_response, read_test_file_content
from city_scrapers.constants import CITY_COUNCIL
from city_scrapers.spiders.chi_citycouncil import ChiCityCouncilSpider
INITIAL_REQUEST = 'https://ocd.datamade.us/events/?' \
'start_date__gt=2017-10-16&' \
'jurisdiction=ocd-jurisdiction/country:us/state:il/place:chicago/government'
spider = ChiCityCouncilSpider()
@pytest.fixture('module')
def parsed_item():
freezer = freeze_time('2018-01-01 12:00:01')
freezer.start()
item = file_response('files/chi_citycouncil_event.json', url=INITIAL_REQUEST)
parsed = spider._parse_item(item)
freezer.stop()
return parsed
def test_parse():
response = file_response('files/chi_citycouncil_feed.json', url=INITIAL_REQUEST)
requests = list(spider.parse(response))
assert len(requests) == 2
def test_gen_requests():
test_response = json.loads(read_test_file_content('files/chi_citycouncil_feed.json'))
event_requests = [item for item in spider._gen_requests(test_response)]
assert event_requests == [
'https://ocd.datamade.us/ocd-event/86094f46-cf45-46f8-89e2-0bf783e7aa12/',
'https://ocd.datamade.us/ocd-event/93d62d20-b1dc-4d71-9e96-60c99c837e90/',
]
def test_addtl_pages():
more = json.loads(
'{"meta": {"page": 1, "per_page": 100, "total_count": 160, "count": 100, "max_page": 2}}'
)
assert spider._addtl_pages(more) is True
no_more = json.loads(
'{"meta": {"page": 1, "per_page": 100, "total_count": 2, "count": 2, "max_page": 1}}'
)
assert spider._addtl_pages(no_more) is False
def test_next_page():
more = json.loads(
'{"meta": {"page": 1, "per_page": 100, "total_count": 160, "count": 100, "max_page": 2}}'
)
original_params = parse_qs(INITIAL_REQUEST)
next_page = spider._next_page(more)
static_params = {k: v for k, v in original_params.items() if k != 'page'}
assert static_params == original_params
assert next_page == 2
def test_parse_documents():
documents = [{
"date": "",
"note": "Notice",
"links": [{
"url": (
"http://media.legistar.com/chic/meetings/633C3556-29C4-4645-A916-E767E00A98CC/"
"Notice,%2003-22-2018.pdf"
),
"media_type": "application/pdf"
}]
}]
assert spider._parse_documents(documents)[0] == {
'url': documents[0]['links'][0]['url'],
'note': "Notice"
}
# Item fields
def test_start(parsed_item):
expected_start = {'date': date(2017, 10, 16), 'time': time(10, 00), 'note': ''}
assert parsed_item['start'] == expected_start
def test_end(parsed_item):
expected_end = {'date': date(2017, 10, 16), 'time': None, 'note': ''}
assert parsed_item['end'] == expected_end
def test_name(parsed_item):
assert parsed_item['name'] == 'Joint Committee: Finance; Transportation and Public Way'
def test_description(parsed_item):
assert parsed_item['event_description'] == ""
def test_location(parsed_item):
expected_location = {
'address': '121 N LaSalle Dr, Chicago, IL',
'name': 'Council Chambers , City Hall'
}
assert parsed_item['location'] == expected_location
def test_documents(parsed_item):
assert parsed_item['documents'] == [{
"url":
"http://media.legistar.com/chic/meetings/B5103C52-1793-4B07-9F28-E0A1223E1540/Fin%20CANCELLED%2010-16_20171010085450.pdf", # noqa
"note": "Cancellation Notice",
}]
def test_id(parsed_item):
assert parsed_item['id'] == \
'chi_citycouncil/201710161000/ocd-event-86094f46-cf45-46f8-89e2-0bf783e7aa12/joint_committee_finance_transportation_and_public_way' # noqa
def test_all_day(parsed_item):
assert parsed_item['all_day'] is False
def test_classification(parsed_item):
assert parsed_item['classification'] == CITY_COUNCIL
def test_status(parsed_item):
assert parsed_item['status'] == 'cancelled'
def test__type(parsed_item):
assert parsed_item['_type'] == 'event'
def test_sources(parsed_item):
expected_sources = [
{
"url": "http://webapi.legistar.com/v1/chicago/events/4954",
"note": "api"
},
{
"url":
"https://chicago.legistar.com/MeetingDetail.aspx?ID=565455&GUID=B5103C52-1793-4B07-9F28-E0A1223E1540&Options=info&Search=", # noqa
"note": "web"
}
]
assert parsed_item['sources'] == expected_sources
| 30.496732 | 150 | 0.658594 | import json
from datetime import date, time
from urllib.parse import parse_qs
import pytest
from freezegun import freeze_time
from tests.utils import file_response, read_test_file_content
from city_scrapers.constants import CITY_COUNCIL
from city_scrapers.spiders.chi_citycouncil import ChiCityCouncilSpider
INITIAL_REQUEST = 'https://ocd.datamade.us/events/?' \
'start_date__gt=2017-10-16&' \
'jurisdiction=ocd-jurisdiction/country:us/state:il/place:chicago/government'
spider = ChiCityCouncilSpider()
@pytest.fixture('module')
def parsed_item():
freezer = freeze_time('2018-01-01 12:00:01')
freezer.start()
item = file_response('files/chi_citycouncil_event.json', url=INITIAL_REQUEST)
parsed = spider._parse_item(item)
freezer.stop()
return parsed
def test_parse():
response = file_response('files/chi_citycouncil_feed.json', url=INITIAL_REQUEST)
requests = list(spider.parse(response))
assert len(requests) == 2
def test_gen_requests():
test_response = json.loads(read_test_file_content('files/chi_citycouncil_feed.json'))
event_requests = [item for item in spider._gen_requests(test_response)]
assert event_requests == [
'https://ocd.datamade.us/ocd-event/86094f46-cf45-46f8-89e2-0bf783e7aa12/',
'https://ocd.datamade.us/ocd-event/93d62d20-b1dc-4d71-9e96-60c99c837e90/',
]
def test_addtl_pages():
more = json.loads(
'{"meta": {"page": 1, "per_page": 100, "total_count": 160, "count": 100, "max_page": 2}}'
)
assert spider._addtl_pages(more) is True
no_more = json.loads(
'{"meta": {"page": 1, "per_page": 100, "total_count": 2, "count": 2, "max_page": 1}}'
)
assert spider._addtl_pages(no_more) is False
def test_next_page():
more = json.loads(
'{"meta": {"page": 1, "per_page": 100, "total_count": 160, "count": 100, "max_page": 2}}'
)
original_params = parse_qs(INITIAL_REQUEST)
next_page = spider._next_page(more)
static_params = {k: v for k, v in original_params.items() if k != 'page'}
assert static_params == original_params
assert next_page == 2
def test_parse_documents():
documents = [{
"date": "",
"note": "Notice",
"links": [{
"url": (
"http://media.legistar.com/chic/meetings/633C3556-29C4-4645-A916-E767E00A98CC/"
"Notice,%2003-22-2018.pdf"
),
"media_type": "application/pdf"
}]
}]
assert spider._parse_documents(documents)[0] == {
'url': documents[0]['links'][0]['url'],
'note': "Notice"
}
def test_start(parsed_item):
expected_start = {'date': date(2017, 10, 16), 'time': time(10, 00), 'note': ''}
assert parsed_item['start'] == expected_start
def test_end(parsed_item):
expected_end = {'date': date(2017, 10, 16), 'time': None, 'note': ''}
assert parsed_item['end'] == expected_end
def test_name(parsed_item):
assert parsed_item['name'] == 'Joint Committee: Finance; Transportation and Public Way'
def test_description(parsed_item):
assert parsed_item['event_description'] == ""
def test_location(parsed_item):
expected_location = {
'address': '121 N LaSalle Dr, Chicago, IL',
'name': 'Council Chambers , City Hall'
}
assert parsed_item['location'] == expected_location
def test_documents(parsed_item):
assert parsed_item['documents'] == [{
"url":
"http://media.legistar.com/chic/meetings/B5103C52-1793-4B07-9F28-E0A1223E1540/Fin%20CANCELLED%2010-16_20171010085450.pdf",
"note": "Cancellation Notice",
}]
def test_id(parsed_item):
assert parsed_item['id'] == \
'chi_citycouncil/201710161000/ocd-event-86094f46-cf45-46f8-89e2-0bf783e7aa12/joint_committee_finance_transportation_and_public_way'
def test_all_day(parsed_item):
assert parsed_item['all_day'] is False
def test_classification(parsed_item):
assert parsed_item['classification'] == CITY_COUNCIL
def test_status(parsed_item):
assert parsed_item['status'] == 'cancelled'
def test__type(parsed_item):
assert parsed_item['_type'] == 'event'
def test_sources(parsed_item):
expected_sources = [
{
"url": "http://webapi.legistar.com/v1/chicago/events/4954",
"note": "api"
},
{
"url":
"https://chicago.legistar.com/MeetingDetail.aspx?ID=565455&GUID=B5103C52-1793-4B07-9F28-E0A1223E1540&Options=info&Search=",
"note": "web"
}
]
assert parsed_item['sources'] == expected_sources
| true | true |
f73ab626a16c21cc6848405eb3ef504d504ce3f3 | 1,365 | py | Python | minidoc/bin.py | ihgazni2/minidoc | b2859069bad5e718692b57d5498389473c66bd2e | [
"MIT"
] | null | null | null | minidoc/bin.py | ihgazni2/minidoc | b2859069bad5e718692b57d5498389473c66bd2e | [
"MIT"
] | null | null | null | minidoc/bin.py | ihgazni2/minidoc | b2859069bad5e718692b57d5498389473c66bd2e | [
"MIT"
] | null | null | null | from minidoc import minidoc
from minidoc import tst
import argparse
from efdir import fs
parser = argparse.ArgumentParser()
parser.add_argument('-tst','--test_file', default="code.tst.py",help=".tst.py file name")
parser.add_argument('-codec','--codec', default="utf-8",help=".tst.py file codec")
parser.add_argument('-still','--still_frames', default="True",help="generate screen shot")
parser.add_argument('-rows','--rownums', default="30",help="screen height")
parser.add_argument('-dst','--dst_dir', default="./images",help="destination svg dir")
parser.add_argument('-title','--title', default="Usage",help="parent title")
parser.add_argument('-tbot','--title_bot', default="=",help="parent title bottom char")
parser.add_argument('-ebot','--entry_bot', default="-",help="entry title bottom char")
def boolize(s):
s = s.lower()
if(s=="true"):
return(True)
elif(s=="false"):
return(False)
else:
return(False)
args = parser.parse_args()
still_frames = boolize(args.still_frames)
def main():
kl,vl = tst.tst2kvlist(fn=args.test_file,codec=args.codec)
minidoc.creat_svgs(kl,vl,still_frames=still_frames,rownums=int(args.rownums),dst_dir=args.dst_dir)
rst_str = tst.creat_rst(kl,vl,title=args.title,title_bot=args.title_bot,entry_bot=args.entry_bot)
fs.wfile(args.title+".rst",rst_str,codec=args.codec)
| 39 | 102 | 0.711355 | from minidoc import minidoc
from minidoc import tst
import argparse
from efdir import fs
parser = argparse.ArgumentParser()
parser.add_argument('-tst','--test_file', default="code.tst.py",help=".tst.py file name")
parser.add_argument('-codec','--codec', default="utf-8",help=".tst.py file codec")
parser.add_argument('-still','--still_frames', default="True",help="generate screen shot")
parser.add_argument('-rows','--rownums', default="30",help="screen height")
parser.add_argument('-dst','--dst_dir', default="./images",help="destination svg dir")
parser.add_argument('-title','--title', default="Usage",help="parent title")
parser.add_argument('-tbot','--title_bot', default="=",help="parent title bottom char")
parser.add_argument('-ebot','--entry_bot', default="-",help="entry title bottom char")
def boolize(s):
s = s.lower()
if(s=="true"):
return(True)
elif(s=="false"):
return(False)
else:
return(False)
args = parser.parse_args()
still_frames = boolize(args.still_frames)
def main():
kl,vl = tst.tst2kvlist(fn=args.test_file,codec=args.codec)
minidoc.creat_svgs(kl,vl,still_frames=still_frames,rownums=int(args.rownums),dst_dir=args.dst_dir)
rst_str = tst.creat_rst(kl,vl,title=args.title,title_bot=args.title_bot,entry_bot=args.entry_bot)
fs.wfile(args.title+".rst",rst_str,codec=args.codec)
| true | true |
f73ab69d03a3a5646a5c54640110708f8c08b9f8 | 1,234 | py | Python | app.py | cinmoy98/neural-network-visualizer | bbb8a5237fe60ee552e3f343ab03707d381895dc | [
"MIT"
] | null | null | null | app.py | cinmoy98/neural-network-visualizer | bbb8a5237fe60ee552e3f343ab03707d381895dc | [
"MIT"
] | 1 | 2020-06-08T19:13:34.000Z | 2020-06-08T19:13:34.000Z | app.py | cinmoy98/neural-network-visualizer-webapp | bbb8a5237fe60ee552e3f343ab03707d381895dc | [
"MIT"
] | null | null | null |
import streamlit as st
import json
import requests
import matplotlib.pyplot as plt
import numpy as np
URI = 'http://neural-net-viz-flask.herokuapp.com'
st.title('Nural Network Visualizer')
st.sidebar.markdown('## Input Image')
if st.button('Get Random Prediction'):
response = requests.post(URI, data={})
response = json.loads(response.text)
preds = response.get('prediction')
image = response.get('image')
image = np.reshape(image, (28, 28))
st.sidebar.image(image, width=150)
for layer, p in enumerate(preds):
numbers = np.squeeze(np.array(p))
plt.figure(figsize=(32, 4))
if layer == 2:
row = 1
col = 10
else:
row = 2
col = 16
for i, number in enumerate(numbers):
plt.subplot(row, col, i+1)
plt.imshow(number * np.ones((8, 8, 3)).astype('float32'))
plt.xticks([])
plt.yticks([])
if layer == 2:
plt.xlabel(str(i), fontsize=40)
plt.subplots_adjust(wspace=0.05, hspace=0.05)
plt.tight_layout()
st.text('Layer {}'.format(layer + 1))
st.pyplot()
| 26.255319 | 69 | 0.54376 |
import streamlit as st
import json
import requests
import matplotlib.pyplot as plt
import numpy as np
URI = 'http://neural-net-viz-flask.herokuapp.com'
st.title('Nural Network Visualizer')
st.sidebar.markdown('## Input Image')
if st.button('Get Random Prediction'):
response = requests.post(URI, data={})
response = json.loads(response.text)
preds = response.get('prediction')
image = response.get('image')
image = np.reshape(image, (28, 28))
st.sidebar.image(image, width=150)
for layer, p in enumerate(preds):
numbers = np.squeeze(np.array(p))
plt.figure(figsize=(32, 4))
if layer == 2:
row = 1
col = 10
else:
row = 2
col = 16
for i, number in enumerate(numbers):
plt.subplot(row, col, i+1)
plt.imshow(number * np.ones((8, 8, 3)).astype('float32'))
plt.xticks([])
plt.yticks([])
if layer == 2:
plt.xlabel(str(i), fontsize=40)
plt.subplots_adjust(wspace=0.05, hspace=0.05)
plt.tight_layout()
st.text('Layer {}'.format(layer + 1))
st.pyplot()
| true | true |
f73ab6c71c82b06fc1fc4eff0a310fc869b130b2 | 336 | py | Python | shop/migrations/0003_auto_20190608_1137.py | chidibede/Django-Ecommerce-Site | c3a139ccf6e67ea90ab3879afcb16528be008548 | [
"MIT"
] | null | null | null | shop/migrations/0003_auto_20190608_1137.py | chidibede/Django-Ecommerce-Site | c3a139ccf6e67ea90ab3879afcb16528be008548 | [
"MIT"
] | null | null | null | shop/migrations/0003_auto_20190608_1137.py | chidibede/Django-Ecommerce-Site | c3a139ccf6e67ea90ab3879afcb16528be008548 | [
"MIT"
] | null | null | null | # Generated by Django 2.2 on 2019-06-08 10:37
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_auto_20190608_1135'),
]
operations = [
migrations.RenameModel(
old_name='Smart_Watch',
new_name='Smart_Watche',
),
]
| 18.666667 | 45 | 0.604167 |
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('shop', '0002_auto_20190608_1135'),
]
operations = [
migrations.RenameModel(
old_name='Smart_Watch',
new_name='Smart_Watche',
),
]
| true | true |
f73ab76404bc56d9e66c7d27d97571d01b26c578 | 712 | py | Python | test/__main__.py | daviddever/skybot | 1dfcd5889f1711b24098c2d6640ec47a2dbd047c | [
"Unlicense"
] | 114 | 2015-01-04T07:44:12.000Z | 2021-09-12T16:06:40.000Z | test/__main__.py | daviddever/skybot | 1dfcd5889f1711b24098c2d6640ec47a2dbd047c | [
"Unlicense"
] | 54 | 2015-01-11T21:55:59.000Z | 2021-04-24T22:19:53.000Z | test/__main__.py | daviddever/skybot | 1dfcd5889f1711b24098c2d6640ec47a2dbd047c | [
"Unlicense"
] | 84 | 2015-01-05T23:10:38.000Z | 2021-12-24T06:08:09.000Z | from os import path
from unittest import TestSuite, TestLoader, TextTestRunner
import sys
if __name__ == "__main__":
# Because the project is structured differently than
# any tooling expects, we need to modify the python
# path during runtime (or before) to get it to
# properly import plugins and other code correctly.
project_root_directory = path.dirname(path.dirname(__file__))
sys.path.append(path.join(project_root_directory, "plugins"))
sys.path.append(path.join(project_root_directory))
discovered_tests = TestLoader().discover(path.dirname(__file__))
run_result = TextTestRunner().run(discovered_tests)
if not run_result.wasSuccessful():
sys.exit(1)
| 35.6 | 68 | 0.745787 | from os import path
from unittest import TestSuite, TestLoader, TextTestRunner
import sys
if __name__ == "__main__":
project_root_directory = path.dirname(path.dirname(__file__))
sys.path.append(path.join(project_root_directory, "plugins"))
sys.path.append(path.join(project_root_directory))
discovered_tests = TestLoader().discover(path.dirname(__file__))
run_result = TextTestRunner().run(discovered_tests)
if not run_result.wasSuccessful():
sys.exit(1)
| true | true |
f73ab7c8cbdfa2c0eea579f94eaae11d2b328bdd | 1,099 | py | Python | books/models.py | Toluwalemi/django-bookstore-app | 588d99ee9075fd7cfa9bf0d8fc251efcd3caabb2 | [
"MIT"
] | null | null | null | books/models.py | Toluwalemi/django-bookstore-app | 588d99ee9075fd7cfa9bf0d8fc251efcd3caabb2 | [
"MIT"
] | null | null | null | books/models.py | Toluwalemi/django-bookstore-app | 588d99ee9075fd7cfa9bf0d8fc251efcd3caabb2 | [
"MIT"
] | null | null | null | import uuid
from django.contrib.auth import get_user_model
from django.db import models
# Create your models here.
from django.urls import reverse
class Book(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(max_length=250)
author = models.CharField(max_length=250)
price = models.DecimalField(max_digits=6, decimal_places=2)
cover = models.ImageField(upload_to='covers/', blank=True)
class Meta:
indexes = [
models.Index(fields=['id'], name='id_index'),
]
permissions = [
('special_status', 'Can read all books'),
]
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('book_detail', args=[str(self.id)])
class Review(models.Model):
book = models.ForeignKey(Book, on_delete=models.CASCADE, related_name='reviews', )
review = models.CharField(max_length=250)
author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
def __str__(self):
return self.review
| 28.179487 | 86 | 0.681529 | import uuid
from django.contrib.auth import get_user_model
from django.db import models
from django.urls import reverse
class Book(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
title = models.CharField(max_length=250)
author = models.CharField(max_length=250)
price = models.DecimalField(max_digits=6, decimal_places=2)
cover = models.ImageField(upload_to='covers/', blank=True)
class Meta:
indexes = [
models.Index(fields=['id'], name='id_index'),
]
permissions = [
('special_status', 'Can read all books'),
]
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('book_detail', args=[str(self.id)])
class Review(models.Model):
book = models.ForeignKey(Book, on_delete=models.CASCADE, related_name='reviews', )
review = models.CharField(max_length=250)
author = models.ForeignKey(get_user_model(), on_delete=models.CASCADE)
def __str__(self):
return self.review
| true | true |
f73ab7f8d73b3bd19addce32766b4de676985cec | 3,465 | py | Python | coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py | odedzewi/coremltools | 055d4bf9c00dee8a38258128d6599609df9ae32c | [
"BSD-3-Clause"
] | 1 | 2022-02-10T10:54:28.000Z | 2022-02-10T10:54:28.000Z | coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py | 0xgpapad/coremltools | fdd5630c423c0fc4f1a04c3f5a3c17b808a15505 | [
"BSD-3-Clause"
] | null | null | null | coremltools/converters/mil/backend/mil/passes/insert_image_preprocessing_op.py | 0xgpapad/coremltools | fdd5630c423c0fc4f1a04c3f5a3c17b808a15505 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass
from coremltools.converters.mil.input_types import ImageType
# import mil internal ops to add it to the builder
from coremltools.converters.mil.mil.ops import defs as _ops
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil.types import nptype_from_builtin
import numpy as np
@register_pass(namespace="mil_backend")
class insert_image_preprocessing_ops(AbstractGraphPass):
"""
Insert preprocessing ops, right after the input if its of type Image
"""
def apply(self, prog):
for f_name, f in prog.functions.items():
if f_name == 'main':
_insert_image_preprocessing_ops(f, prog)
def _insert_image_preprocessing_ops(block, prog):
input_types = list(prog.main_input_types)
for input_type in input_types:
if isinstance(input_type, ImageType):
if input_type.name not in block.inputs:
continue
input_var = block.inputs[input_type.name]
placeholder_op = block.placeholder_inputs[input_type.name]
first_op = block.operations[0]
old_var = placeholder_op.outputs[0]
has_bias = np.any(np.array(input_type.bias) != 0)
with block:
last_output = input_var
input_nptype = nptype_from_builtin(type(last_output.dtype()))
if input_type.scale != 1:
last_output = mb.mul(x=last_output,
y=np.array(input_type.scale, dtype=input_nptype),
before_op=first_op, name=input_var.name + "__scaled__")
if has_bias:
if input_type.color_layout == "G":
last_output = mb.add(x=last_output,
y=np.array(input_type.bias, dtype=input_nptype),
before_op=first_op, name=input_var.name + "__biased__")
else:
if len(last_output.shape) == 3:
last_output = mb.add(x=last_output,
y=np.array(input_type.bias, dtype=input_nptype).reshape([3, 1, 1]),
before_op=first_op, name=input_var.name + "__biased__")
elif len(last_output.shape) == 4:
last_output = mb.add(x=last_output,
y=np.array(input_type.bias, dtype=input_nptype).reshape([1, 3, 1, 1]),
before_op=first_op, name=input_var.name + "__biased__")
else:
raise TypeError("Unsupported rank for image input type.")
if last_output != input_var:
block.replace_uses_of_var_after_op(anchor_op=last_output.op,
old_var=old_var,
new_var=last_output)
| 49.5 | 119 | 0.567965 |
from coremltools.converters.mil.mil.passes.pass_registry import register_pass
from coremltools.converters.mil.mil.passes.graph_pass import AbstractGraphPass
from coremltools.converters.mil.input_types import ImageType
from coremltools.converters.mil.mil.ops import defs as _ops
from coremltools.converters.mil.mil import Builder as mb
from coremltools.converters.mil.mil.types import nptype_from_builtin
import numpy as np
@register_pass(namespace="mil_backend")
class insert_image_preprocessing_ops(AbstractGraphPass):
def apply(self, prog):
for f_name, f in prog.functions.items():
if f_name == 'main':
_insert_image_preprocessing_ops(f, prog)
def _insert_image_preprocessing_ops(block, prog):
input_types = list(prog.main_input_types)
for input_type in input_types:
if isinstance(input_type, ImageType):
if input_type.name not in block.inputs:
continue
input_var = block.inputs[input_type.name]
placeholder_op = block.placeholder_inputs[input_type.name]
first_op = block.operations[0]
old_var = placeholder_op.outputs[0]
has_bias = np.any(np.array(input_type.bias) != 0)
with block:
last_output = input_var
input_nptype = nptype_from_builtin(type(last_output.dtype()))
if input_type.scale != 1:
last_output = mb.mul(x=last_output,
y=np.array(input_type.scale, dtype=input_nptype),
before_op=first_op, name=input_var.name + "__scaled__")
if has_bias:
if input_type.color_layout == "G":
last_output = mb.add(x=last_output,
y=np.array(input_type.bias, dtype=input_nptype),
before_op=first_op, name=input_var.name + "__biased__")
else:
if len(last_output.shape) == 3:
last_output = mb.add(x=last_output,
y=np.array(input_type.bias, dtype=input_nptype).reshape([3, 1, 1]),
before_op=first_op, name=input_var.name + "__biased__")
elif len(last_output.shape) == 4:
last_output = mb.add(x=last_output,
y=np.array(input_type.bias, dtype=input_nptype).reshape([1, 3, 1, 1]),
before_op=first_op, name=input_var.name + "__biased__")
else:
raise TypeError("Unsupported rank for image input type.")
if last_output != input_var:
block.replace_uses_of_var_after_op(anchor_op=last_output.op,
old_var=old_var,
new_var=last_output)
| true | true |
f73ab8bfb0618c9b58ff5d1b1c0a124f476c43c6 | 13,530 | py | Python | tests/consensus-validation-malicious-producers.py | nikola-tesla-448/iwill-master | e00aa1daf1fd5093a48cca4117a08f45b2e53db9 | [
"MIT"
] | null | null | null | tests/consensus-validation-malicious-producers.py | nikola-tesla-448/iwill-master | e00aa1daf1fd5093a48cca4117a08f45b2e53db9 | [
"MIT"
] | null | null | null | tests/consensus-validation-malicious-producers.py | nikola-tesla-448/iwill-master | e00aa1daf1fd5093a48cca4117a08f45b2e53db9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import testUtils
import argparse
import signal
from collections import namedtuple
import os
import shutil
###############################################################
# Test for validating consensus based block production. We introduce malicious producers which
# reject all transactions.
# We have three test scenarios:
# - No malicious producers. Transactions should be incorporated into the chain.
# - Minority malicious producers (less than a third producer count). Transactions will get incorporated
# into the chain as majority appoves the transactions.
# - Majority malicious producer count (greater than a third producer count). Transactions won't get
# incorporated into the chain as majority rejects the transactions.
###############################################################
Print=testUtils.Utils.Print
errorExit=Utils.errorExit
StagedNodeInfo=namedtuple("StagedNodeInfo", "config logging")
logging00="""{
"includes": [],
"appenders": [{
"name": "stderr",
"type": "console",
"args": {
"stream": "std_error",
"level_colors": [{
"level": "debug",
"color": "green"
},{
"level": "warn",
"color": "brown"
},{
"level": "error",
"color": "red"
}
]
},
"enabled": true
},{
"name": "stdout",
"type": "console",
"args": {
"stream": "std_out",
"level_colors": [{
"level": "debug",
"color": "green"
},{
"level": "warn",
"color": "brown"
},{
"level": "error",
"color": "red"
}
]
},
"enabled": true
},{
"name": "net",
"type": "gelf",
"args": {
"endpoint": "10.160.11.21:12201",
"host": "testnet_00"
},
"enabled": true
}
],
"loggers": [{
"name": "default",
"level": "debug",
"enabled": true,
"additivity": false,
"appenders": [
"stderr",
"net"
]
}
]
}"""
config00="""genesis-json = ./genesis.json
block-log-dir = blocks
readonly = 0
send-whole-blocks = true
shared-file-dir = blockchain
shared-file-size = 8192
http-server-address = 127.0.0.1:8888
p2p-listen-endpoint = 0.0.0.0:9876
p2p-server-address = localhost:9876
allowed-connection = any
p2p-peer-address = localhost:9877
required-participation = true
private-key = ["IWILL6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"]
producer-name = initu
plugin = iwillio::producer_plugin
plugin = iwillio::chain_api_plugin
plugin = iwillio::history_plugin
plugin = iwillio::history_api_plugin"""
config01="""genesis-json = ./genesis.json
block-log-dir = blocks
readonly = 0
send-whole-blocks = true
shared-file-dir = blockchain
shared-file-size = 8192
http-server-address = 127.0.0.1:8889
p2p-listen-endpoint = 0.0.0.0:9877
p2p-server-address = localhost:9877
allowed-connection = any
p2p-peer-address = localhost:9876
required-participation = true
private-key = ["IWILL6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"]
producer-name = defproducerb
plugin = iwillio::producer_plugin
plugin = iwillio::chain_api_plugin
plugin = iwillio::history_plugin
plugin = iwillio::history_api_plugin"""
producers="""producer-name = defproducerd
producer-name = defproducerf
producer-name = defproducerh
producer-name = defproducerj
producer-name = defproducerl
producer-name = defproducern
producer-name = defproducerp
producer-name = defproducerr
producer-name = defproducert
producer-name = defproducera
producer-name = defproducerc
producer-name = defproducere
producer-name = defproducerg
producer-name = defproduceri
producer-name = defproducerk
producer-name = defproducerm
producer-name = defproducero
producer-name = defproducerq
producer-name = defproducers"""
zeroExecTime="trans-execution-time = 0"
def getNoMaliciousStagedNodesInfo():
stagedNodesInfo=[]
myConfig00=config00
stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00))
myConfig01=config01+"\n"+producers
stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00))
return stagedNodesInfo
def getMinorityMaliciousProducerStagedNodesInfo():
stagedNodesInfo=[]
myConfig00=config00+"\n"+producers
stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00))
myConfig01=config01+"\n"+zeroExecTime
stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00))
return stagedNodesInfo
def getMajorityMaliciousProducerStagedNodesInfo():
stagedNodesInfo=[]
myConfig00=config00
stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00))
myConfig01=config01+"\n"+producers+"\n"+zeroExecTime
stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00))
return stagedNodesInfo
stagingDir="staging"
def stageScenario(stagedNodeInfos):
assert(stagedNodeInfos != None)
assert(len(stagedNodeInfos) > 1)
os.makedirs(stagingDir)
count=0
for stagedNodeInfo in stagedNodeInfos:
configPath=os.path.join(stagingDir, "etc/iwillio/node_%02d" % (count))
os.makedirs(configPath)
with open(os.path.join(configPath, "config.ini"), "w") as textFile:
print(stagedNodeInfo.config,file=textFile)
with open(os.path.join(configPath, "logging.json"), "w") as textFile:
print(stagedNodeInfo.logging,file=textFile)
count += 1
return
def cleanStaging():
os.path.exists(stagingDir) and shutil.rmtree(stagingDir)
def error(msg="", errorCode=1):
Print("ERROR:", msg)
parser = argparse.ArgumentParser()
tests=[1,2,3]
parser.add_argument("-t", "--tests", type=str, help="1|2|3 1=run no malicious producers test, 2=minority malicious, 3=majority malicious.", default=None)
parser.add_argument("-w", type=int, help="system wait time", default=testUtils.Utils.systemWaitTimeout)
parser.add_argument("-v", help="verbose logging", action='store_true')
parser.add_argument("--dump-error-details",
help="Upon error print etc/iwillio/node_*/config.ini and var/lib/node_*/stderr.log to stdout",
action='store_true')
parser.add_argument("--keep-logs", help="Don't delete var/lib/node_* folders upon test completion",
action='store_true')
parser.add_argument("--not-noon", help="This is not the Noon branch.", action='store_true')
parser.add_argument("--dont-kill", help="Leave cluster running after test finishes", action='store_true')
args = parser.parse_args()
testsArg=args.tests
debug=args.v
waitTimeout=args.w
dumpErrorDetails=args.dump-error-details
keepLogs=args.keep-logs
amINoon=not args.not_noon
killIwillInstances= not args.dont-kill
killWallet= not args.dont-kill
testUtils.Utils.Debug=debug
assert (testsArg is None or testsArg == "1" or testsArg == "2" or testsArg == "3")
if testsArg is not None:
tests=[int(testsArg)]
testUtils.Utils.setSystemWaitTimeout(waitTimeout)
testUtils.Utils.iAmNotNoon()
def myTest(transWillEnterBlock):
testSuccessful=False
cluster=testUtils.Cluster(walletd=True, staging=True)
walletMgr=testUtils.WalletMgr(True)
try:
cluster.killall()
cluster.cleanup()
walletMgr.killall()
walletMgr.cleanup()
pnodes=2
total_nodes=pnodes
topo="mesh"
delay=0
Print("Stand up cluster")
if cluster.launch(pnodes, total_nodes, topo, delay) is False:
error("Failed to stand up iwill cluster.")
return False
accounts=testUtils.Cluster.createAccountKeys(1)
if accounts is None:
error("FAILURE - create keys")
return False
currencyAccount=accounts[0]
currencyAccount.name="currency0000"
testWalletName="test"
Print("Creating wallet \"%s\"." % (testWalletName))
testWallet=walletMgr.create(testWalletName)
for account in accounts:
Print("Importing keys for account %s into wallet %s." % (account.name, testWallet.name))
if not walletMgr.importKey(account, testWallet):
error("Failed to import key for account %s" % (account.name))
return False
node=cluster.getNode(0)
node2=cluster.getNode(1)
defproduceraAccount=testUtils.Cluster.defproduceraAccount
Print("Importing keys for account %s into wallet %s." % (defproduceraAccount.name, testWallet.name))
if not walletMgr.importKey(defproduceraAccount, testWallet):
error("Failed to import key for account %s" % (defproduceraAccount.name))
return False
Print("Create new account %s via %s" % (currencyAccount.name, defproduceraAccount.name))
transId=node.createAccount(currencyAccount, defproduceraAccount, stakedDeposit=5000, waitForTransBlock=True)
if transId is None:
error("Failed to create account %s" % (currencyAccount.name))
return False
wasmFile="currency.wasm"
abiFile="currency.abi"
Print("Publish contract")
trans=node.publishContract(currencyAccount.name, wasmFile, abiFile, waitForTransBlock=True)
if trans is None:
error("Failed to publish contract.")
return False
Print("push transfer action to currency0000 contract")
contract="currency0000"
action="transfer"
data="{\"from\":\"currency0000\",\"to\":\"defproducera\",\"quantity\":"
if amINoon:
data +="\"00.0050 CUR\",\"memo\":\"test\"}"
else:
data +="50}"
opts="--permission currency0000@active"
if not amINoon:
opts += " --scope currency0000,defproducera"
trans=node.pushMessage(contract, action, data, opts, silentErrors=True)
transInBlock=False
if not trans[0]:
# On slower systems e.g Travis the transaction rejection can happen immediately
# We want to handle fast and slow failures.
if "allocated processing time was exceeded" in trans[1]:
Print("Push message transaction immediately failed.")
else:
error("Exception in push message. %s" % (trans[1]))
return False
else:
transId=testUtils.Node.getTransId(trans[1])
Print("verify transaction exists")
if not node2.waitForTransInBlock(transId):
error("Transaction never made it to node2")
return False
Print("Get details for transaction %s" % (transId))
transaction=node2.getTransaction(transId, exitOnError=True)
signature=transaction["transaction"]["signatures"][0]
blockNum=int(transaction["transaction"]["ref_block_num"])
blockNum += 1
Print("Our transaction is in block %d" % (blockNum))
block=node2.getBlock(blockNum, exitOnError=True)
cycles=block["cycles"]
if len(cycles) > 0:
blockTransSignature=cycles[0][0]["user_input"][0]["signatures"][0]
# Print("Transaction signature: %s\nBlock transaction signature: %s" %
# (signature, blockTransSignature))
transInBlock=(signature == blockTransSignature)
if transWillEnterBlock:
if not transInBlock:
error("Transaction did not enter the chain.")
return False
else:
Print("SUCCESS: Transaction1 entered in the chain.")
elif not transWillEnterBlock:
if transInBlock:
error("Transaction entered the chain.")
return False
else:
Print("SUCCESS: Transaction2 did not enter the chain.")
testSuccessful=True
finally:
if not testSuccessful and dumpErrorDetails:
cluster.dumpErrorDetails()
walletMgr.dumpErrorDetails()
Print("== Errors see above ==")
if killIwillInstances:
Print("Shut down the cluster%s" % (" and cleanup." if (testSuccessful and not keepLogs) else "."))
cluster.killall()
walletMgr.killall()
if testSuccessful and not keepLogs:
Print("Cleanup cluster and wallet data.")
cluster.cleanup()
walletMgr.cleanup()
return True
try:
if 1 in tests:
Print("Cluster with no malicious producers. All producers expected to approve transaction. Hence transaction is expected to enter the chain.")
cleanStaging()
stageScenario(getNoMaliciousStagedNodesInfo())
if not myTest(True):
exit(1)
if 2 in tests:
Print("\nCluster with minority(1) malicious nodes. Majority producers expected to approve transaction. Hence transaction is expected to enter the chain.")
cleanStaging()
stageScenario(getMinorityMaliciousProducerStagedNodesInfo())
if not myTest(True):
exit(1)
if 3 in tests:
Print("\nCluster with majority(20) malicious nodes. Majority producers expected to block transaction. Hence transaction is not expected to enter the chain.")
cleanStaging()
stageScenario(getMajorityMaliciousProducerStagedNodesInfo())
if not myTest(False):
exit(1)
finally:
cleanStaging()
exit(0)
| 33.909774 | 165 | 0.649002 |
import testUtils
import argparse
import signal
from collections import namedtuple
import os
import shutil
in = iwillio::producer_plugin
plugin = iwillio::chain_api_plugin
plugin = iwillio::history_plugin
plugin = iwillio::history_api_plugin"""
config01="""genesis-json = ./genesis.json
block-log-dir = blocks
readonly = 0
send-whole-blocks = true
shared-file-dir = blockchain
shared-file-size = 8192
http-server-address = 127.0.0.1:8889
p2p-listen-endpoint = 0.0.0.0:9877
p2p-server-address = localhost:9877
allowed-connection = any
p2p-peer-address = localhost:9876
required-participation = true
private-key = ["IWILL6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV","5KQwrPbwdL6PhXujxW37FSSQZ1JiwsST4cqQzDeyXtP79zkvFD3"]
producer-name = defproducerb
plugin = iwillio::producer_plugin
plugin = iwillio::chain_api_plugin
plugin = iwillio::history_plugin
plugin = iwillio::history_api_plugin"""
producers="""producer-name = defproducerd
producer-name = defproducerf
producer-name = defproducerh
producer-name = defproducerj
producer-name = defproducerl
producer-name = defproducern
producer-name = defproducerp
producer-name = defproducerr
producer-name = defproducert
producer-name = defproducera
producer-name = defproducerc
producer-name = defproducere
producer-name = defproducerg
producer-name = defproduceri
producer-name = defproducerk
producer-name = defproducerm
producer-name = defproducero
producer-name = defproducerq
producer-name = defproducers"""
zeroExecTime="trans-execution-time = 0"
def getNoMaliciousStagedNodesInfo():
stagedNodesInfo=[]
myConfig00=config00
stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00))
myConfig01=config01+"\n"+producers
stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00))
return stagedNodesInfo
def getMinorityMaliciousProducerStagedNodesInfo():
stagedNodesInfo=[]
myConfig00=config00+"\n"+producers
stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00))
myConfig01=config01+"\n"+zeroExecTime
stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00))
return stagedNodesInfo
def getMajorityMaliciousProducerStagedNodesInfo():
stagedNodesInfo=[]
myConfig00=config00
stagedNodesInfo.append(StagedNodeInfo(myConfig00, logging00))
myConfig01=config01+"\n"+producers+"\n"+zeroExecTime
stagedNodesInfo.append(StagedNodeInfo(myConfig01, logging00))
return stagedNodesInfo
stagingDir="staging"
def stageScenario(stagedNodeInfos):
assert(stagedNodeInfos != None)
assert(len(stagedNodeInfos) > 1)
os.makedirs(stagingDir)
count=0
for stagedNodeInfo in stagedNodeInfos:
configPath=os.path.join(stagingDir, "etc/iwillio/node_%02d" % (count))
os.makedirs(configPath)
with open(os.path.join(configPath, "config.ini"), "w") as textFile:
print(stagedNodeInfo.config,file=textFile)
with open(os.path.join(configPath, "logging.json"), "w") as textFile:
print(stagedNodeInfo.logging,file=textFile)
count += 1
return
def cleanStaging():
os.path.exists(stagingDir) and shutil.rmtree(stagingDir)
def error(msg="", errorCode=1):
Print("ERROR:", msg)
parser = argparse.ArgumentParser()
tests=[1,2,3]
parser.add_argument("-t", "--tests", type=str, help="1|2|3 1=run no malicious producers test, 2=minority malicious, 3=majority malicious.", default=None)
parser.add_argument("-w", type=int, help="system wait time", default=testUtils.Utils.systemWaitTimeout)
parser.add_argument("-v", help="verbose logging", action='store_true')
parser.add_argument("--dump-error-details",
help="Upon error print etc/iwillio/node_*/config.ini and var/lib/node_*/stderr.log to stdout",
action='store_true')
parser.add_argument("--keep-logs", help="Don't delete var/lib/node_* folders upon test completion",
action='store_true')
parser.add_argument("--not-noon", help="This is not the Noon branch.", action='store_true')
parser.add_argument("--dont-kill", help="Leave cluster running after test finishes", action='store_true')
args = parser.parse_args()
testsArg=args.tests
debug=args.v
waitTimeout=args.w
dumpErrorDetails=args.dump-error-details
keepLogs=args.keep-logs
amINoon=not args.not_noon
killIwillInstances= not args.dont-kill
killWallet= not args.dont-kill
testUtils.Utils.Debug=debug
assert (testsArg is None or testsArg == "1" or testsArg == "2" or testsArg == "3")
if testsArg is not None:
tests=[int(testsArg)]
testUtils.Utils.setSystemWaitTimeout(waitTimeout)
testUtils.Utils.iAmNotNoon()
def myTest(transWillEnterBlock):
testSuccessful=False
cluster=testUtils.Cluster(walletd=True, staging=True)
walletMgr=testUtils.WalletMgr(True)
try:
cluster.killall()
cluster.cleanup()
walletMgr.killall()
walletMgr.cleanup()
pnodes=2
total_nodes=pnodes
topo="mesh"
delay=0
Print("Stand up cluster")
if cluster.launch(pnodes, total_nodes, topo, delay) is False:
error("Failed to stand up iwill cluster.")
return False
accounts=testUtils.Cluster.createAccountKeys(1)
if accounts is None:
error("FAILURE - create keys")
return False
currencyAccount=accounts[0]
currencyAccount.name="currency0000"
testWalletName="test"
Print("Creating wallet \"%s\"." % (testWalletName))
testWallet=walletMgr.create(testWalletName)
for account in accounts:
Print("Importing keys for account %s into wallet %s." % (account.name, testWallet.name))
if not walletMgr.importKey(account, testWallet):
error("Failed to import key for account %s" % (account.name))
return False
node=cluster.getNode(0)
node2=cluster.getNode(1)
defproduceraAccount=testUtils.Cluster.defproduceraAccount
Print("Importing keys for account %s into wallet %s." % (defproduceraAccount.name, testWallet.name))
if not walletMgr.importKey(defproduceraAccount, testWallet):
error("Failed to import key for account %s" % (defproduceraAccount.name))
return False
Print("Create new account %s via %s" % (currencyAccount.name, defproduceraAccount.name))
transId=node.createAccount(currencyAccount, defproduceraAccount, stakedDeposit=5000, waitForTransBlock=True)
if transId is None:
error("Failed to create account %s" % (currencyAccount.name))
return False
wasmFile="currency.wasm"
abiFile="currency.abi"
Print("Publish contract")
trans=node.publishContract(currencyAccount.name, wasmFile, abiFile, waitForTransBlock=True)
if trans is None:
error("Failed to publish contract.")
return False
Print("push transfer action to currency0000 contract")
contract="currency0000"
action="transfer"
data="{\"from\":\"currency0000\",\"to\":\"defproducera\",\"quantity\":"
if amINoon:
data +="\"00.0050 CUR\",\"memo\":\"test\"}"
else:
data +="50}"
opts="--permission currency0000@active"
if not amINoon:
opts += " --scope currency0000,defproducera"
trans=node.pushMessage(contract, action, data, opts, silentErrors=True)
transInBlock=False
if not trans[0]:
if "allocated processing time was exceeded" in trans[1]:
Print("Push message transaction immediately failed.")
else:
error("Exception in push message. %s" % (trans[1]))
return False
else:
transId=testUtils.Node.getTransId(trans[1])
Print("verify transaction exists")
if not node2.waitForTransInBlock(transId):
error("Transaction never made it to node2")
return False
Print("Get details for transaction %s" % (transId))
transaction=node2.getTransaction(transId, exitOnError=True)
signature=transaction["transaction"]["signatures"][0]
blockNum=int(transaction["transaction"]["ref_block_num"])
blockNum += 1
Print("Our transaction is in block %d" % (blockNum))
block=node2.getBlock(blockNum, exitOnError=True)
cycles=block["cycles"]
if len(cycles) > 0:
blockTransSignature=cycles[0][0]["user_input"][0]["signatures"][0]
transInBlock=(signature == blockTransSignature)
if transWillEnterBlock:
if not transInBlock:
error("Transaction did not enter the chain.")
return False
else:
Print("SUCCESS: Transaction1 entered in the chain.")
elif not transWillEnterBlock:
if transInBlock:
error("Transaction entered the chain.")
return False
else:
Print("SUCCESS: Transaction2 did not enter the chain.")
testSuccessful=True
finally:
if not testSuccessful and dumpErrorDetails:
cluster.dumpErrorDetails()
walletMgr.dumpErrorDetails()
Print("== Errors see above ==")
if killIwillInstances:
Print("Shut down the cluster%s" % (" and cleanup." if (testSuccessful and not keepLogs) else "."))
cluster.killall()
walletMgr.killall()
if testSuccessful and not keepLogs:
Print("Cleanup cluster and wallet data.")
cluster.cleanup()
walletMgr.cleanup()
return True
try:
if 1 in tests:
Print("Cluster with no malicious producers. All producers expected to approve transaction. Hence transaction is expected to enter the chain.")
cleanStaging()
stageScenario(getNoMaliciousStagedNodesInfo())
if not myTest(True):
exit(1)
if 2 in tests:
Print("\nCluster with minority(1) malicious nodes. Majority producers expected to approve transaction. Hence transaction is expected to enter the chain.")
cleanStaging()
stageScenario(getMinorityMaliciousProducerStagedNodesInfo())
if not myTest(True):
exit(1)
if 3 in tests:
Print("\nCluster with majority(20) malicious nodes. Majority producers expected to block transaction. Hence transaction is not expected to enter the chain.")
cleanStaging()
stageScenario(getMajorityMaliciousProducerStagedNodesInfo())
if not myTest(False):
exit(1)
finally:
cleanStaging()
exit(0)
| true | true |
f73ab8f8ffd2cdbd4147905c99511d2a61036bbd | 20,469 | py | Python | tests/test_api_endpoints/test_survey_endpoints.py | wouterpotters/castoredc_api_client | 963f793f0f747281419532242dc65a009d7cc9cd | [
"MIT"
] | null | null | null | tests/test_api_endpoints/test_survey_endpoints.py | wouterpotters/castoredc_api_client | 963f793f0f747281419532242dc65a009d7cc9cd | [
"MIT"
] | null | null | null | tests/test_api_endpoints/test_survey_endpoints.py | wouterpotters/castoredc_api_client | 963f793f0f747281419532242dc65a009d7cc9cd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Testing class for survey endpoints of the Castor EDC API Wrapper.
Link: https://data.castoredc.com/api#/survey
@author: R.C.A. van Linschoten
https://orcid.org/0000-0003-3052-596X
"""
import pytest
from exceptions.exceptions import CastorException
from tests.test_api_endpoints.data_models import (
survey_model,
package_model,
survey_package_instance_model,
)
def create_survey_package_instance_body(record_id, fake):
if fake:
random_package = "FAKE"
else:
random_package = "71C01598-4682-4A4C-90E6-69C0BD38EA47"
return {
"survey_package_id": random_package,
"record_id": record_id,
"ccr_patient_id": None,
"email_address": "clearlyfakemail@itsascam.com",
"package_invitation_subject": None,
"package_invitation": None,
"auto_send": None,
"auto_lock_on_finish": None,
}
class TestSurveyEndpoints:
s_model_keys = survey_model.keys()
p_model_keys = package_model.keys()
i_model_keys = survey_package_instance_model.keys()
test_survey = {
"id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E",
"survey_id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E",
"name": "QOL Survey",
"description": "",
"intro_text": "##### This is the survey intro text. Here you can add some information for the participant that they will see before they start filling in the survey.\n```\n\n\n```\n##### Check the help text in the survey form editor to see how you can format this text or add images and links.\n```\n\n\n```\n### For example, you can use hashtags to make the text bigger or add headings.",
"outro_text": "",
"survey_steps": [],
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/survey/D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E"
}
},
}
test_survey_package = {
"id": "71C01598-4682-4A4C-90E6-69C0BD38EA47",
"survey_package_id": "71C01598-4682-4A4C-90E6-69C0BD38EA47",
"name": "My first survey package",
"description": "",
"sender_name": "Castor EDC",
"auto_send": False,
"allow_step_navigation": True,
"show_step_navigator": True,
"finish_url": "",
"auto_lock_on_finish": False,
"intro_text": "```\n\n\n```\n#### To be able to send surveys, you have to create a survey package that will contain the survey(s) you want to send.\n```\n\n\n```\nHere you can add intro text. This is similar to the intro text in a survey itself, but since a survey package can contain multiple surveys, this is a 'general' introduction that appears in the very beginning.",
"outro_text": "```\n\n\n```\n#### You can now create your own survey! \n```\n\n\n```\n#### Here is a giphy: \n```\n\n\n```\n.",
"default_invitation": 'Dear participant,\n\nYou are participating in the study "Example Study" and we would like to ask you to fill in a survey.\n\nPlease click the link below to complete our survey.\n\n{url}\n\n{logo}',
"default_invitation_subject": "Please fill in this survey for Example Study",
"sender_email": "no-reply@castoredc.com",
"is_mobile": False,
"expire_after_hours": None,
"_embedded": {
"surveys": [
{
"id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E",
"survey_id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E",
"name": "QOL Survey",
"description": "",
"intro_text": "##### This is the survey intro text. Here you can add some information for the participant that they will see before they start filling in the survey.\n```\n\n\n```\n##### Check the help text in the survey form editor to see how you can format this text or add images and links.\n```\n\n\n```\n### For example, you can use hashtags to make the text bigger or add headings.",
"outro_text": "",
"survey_steps": [],
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/survey/D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E"
}
},
}
]
},
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/surveypackage/71C01598-4682-4A4C-90E6-69C0BD38EA47"
}
},
}
test_survey_instance = {
"id": "115DF660-A00A-4927-9E5F-A07D030D4A09",
"survey_package_instance_id": "115DF660-A00A-4927-9E5F-A07D030D4A09",
"record_id": "000001",
"institute_id": "1CFF5802-0B07-471F-B97E-B5166332F2C5",
"institute_name": "Test Institute",
"survey_package_name": "My first survey package",
"survey_package_id": "71C01598-4682-4A4C-90E6-69C0BD38EA47",
"survey_url_string": "DUQKNQNN",
"progress": 100,
"invitation_subject": "Please fill in this survey for Example Study",
"invitation_content": 'Dear participant,\n\nYou are participating in the study "Example Study" and we would like to ask you to fill in a survey.\n\nPlease click the link below to complete our survey.\n\n{url}\n\n{logo}',
"created_on": {
"date": "2019-10-14 09:42:27.000000",
"timezone_type": 3,
"timezone": "Europe/Amsterdam",
},
"created_by": "B23ABCC4-3A53-FB32-7B78-3960CC907F25",
"available_from": {
"date": "2019-10-14 09:42:27.000000",
"timezone_type": 3,
"timezone": "Europe/Amsterdam",
},
"expire_on": None,
"sent_on": None,
"first_opened_on": None,
"finished_on": {
"date": "2020-08-14 16:27:12.000000",
"timezone_type": 3,
"timezone": "Europe/Amsterdam",
},
"locked": False,
"archived": False,
"auto_lock_on_finish": False,
"auto_send": False,
"_embedded": {
"record": {
"id": "000001",
"record_id": "000001",
"ccr_patient_id": "",
"last_opened_step": "FFF23B2C-AEE6-4304-9CC4-9C7C431D5387",
"progress": 28,
"status": "open",
"archived": False,
"archived_reason": None,
"created_by": "B23ABCC4-3A53-FB32-7B78-3960CC907F25",
"created_on": {
"date": "2019-10-07 16:16:02.000000",
"timezone_type": 3,
"timezone": "Europe/Amsterdam",
},
"updated_by": "B23ABCC4-3A53-FB32-7B78-3960CC907F25",
"updated_on": {
"date": "2020-11-27 14:37:55.000000",
"timezone_type": 3,
"timezone": "Europe/Amsterdam",
},
"randomized_id": None,
"randomized_on": None,
"randomization_group": None,
"randomization_group_name": None,
"_embedded": {
"institute": {
"id": "1CFF5802-0B07-471F-B97E-B5166332F2C5",
"institute_id": "1CFF5802-0B07-471F-B97E-B5166332F2C5",
"name": "Test Institute",
"abbreviation": "TES",
"code": "TES",
"order": 0,
"deleted": False,
"country_id": 169,
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/institute/1CFF5802-0B07-471F-B97E-B5166332F2C5"
}
},
}
},
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/record/000001"
}
},
},
"institute": {
"id": "1CFF5802-0B07-471F-B97E-B5166332F2C5",
"institute_id": "1CFF5802-0B07-471F-B97E-B5166332F2C5",
"name": "Test Institute",
"abbreviation": "TES",
"code": "TES",
"order": 0,
"deleted": False,
"country_id": 169,
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/institute/1CFF5802-0B07-471F-B97E-B5166332F2C5"
}
},
},
"survey_package": {
"id": "71C01598-4682-4A4C-90E6-69C0BD38EA47",
"survey_package_id": "71C01598-4682-4A4C-90E6-69C0BD38EA47",
"name": "My first survey package",
"description": "",
"sender_name": "Castor EDC",
"auto_send": False,
"allow_step_navigation": True,
"show_step_navigator": True,
"finish_url": "",
"auto_lock_on_finish": False,
"intro_text": "```\n\n\n```\n#### To be able to send surveys, you have to create a survey package that will contain the survey(s) you want to send.\n```\n\n\n```\nHere you can add intro text. This is similar to the intro text in a survey itself, but since a survey package can contain multiple surveys, this is a 'general' introduction that appears in the very beginning.",
"outro_text": "```\n\n\n```\n#### You can now create your own survey! \n```\n\n\n```\n#### Here is a giphy: \n```\n\n\n```\n.",
"default_invitation": 'Dear participant,\n\nYou are participating in the study "Example Study" and we would like to ask you to fill in a survey.\n\nPlease click the link below to complete our survey.\n\n{url}\n\n{logo}',
"default_invitation_subject": "Please fill in this survey for Example Study",
"sender_email": "no-reply@castoredc.com",
"is_mobile": False,
"expire_after_hours": None,
"_embedded": {
"surveys": [
{
"id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E",
"survey_id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E",
"name": "QOL Survey",
"description": "",
"intro_text": "##### This is the survey intro text. Here you can add some information for the participant that they will see before they start filling in the survey.\n```\n\n\n```\n##### Check the help text in the survey form editor to see how you can format this text or add images and links.\n```\n\n\n```\n### For example, you can use hashtags to make the text bigger or add headings.",
"outro_text": "",
"survey_steps": [],
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/survey/D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E"
}
},
}
]
},
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/surveypackage/71C01598-4682-4A4C-90E6-69C0BD38EA47"
}
},
},
"survey_instances": [
{
"id": "6530D4AB-4705-4864-92AE-B0EC6200E8E5",
"progress": 100,
"progress_total_fields": 5,
"progress_total_fields_not_empty": 5,
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/survey/6530D4AB-4705-4864-92AE-B0EC6200E8E5"
}
},
}
],
"survey_reminders": [],
},
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/surveypackageinstance/115DF660-A00A-4927-9E5F-A07D030D4A09"
}
},
}
@pytest.fixture
def all_surveys(self, client):
"""Get all surveys"""
all_surveys = client.all_surveys()
return all_surveys
@pytest.fixture
def all_survey_packages(self, client):
"""Get all survey packages"""
all_survey_packages = client.all_survey_packages()
return all_survey_packages
@pytest.fixture
def all_survey_package_instances(self, client):
"""Get all survey package instances"""
all_survey_package_instances = client.all_survey_package_instances()
return all_survey_package_instances
# SURVEYS
def test_all_surveys(self, all_surveys):
"""Test the structure returned by all_surveys"""
for survey in all_surveys:
survey_keys = survey.keys()
assert len(survey_keys) == len(self.s_model_keys)
for key in survey_keys:
assert key in self.s_model_keys
assert type(survey[key]) in survey_model[key]
def test_single_survey_success(self, client, all_surveys):
"""Test the structure and data returned by single survey"""
survey = client.single_survey("D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E")
assert survey == self.test_survey
def test_single_survey_fail(self, client, all_surveys):
"""Test calling on a non-existent survey"""
with pytest.raises(CastorException) as e:
client.single_survey("D70C1273-B5D8-45CD-BFE8-A0BA75C4FAKE")
assert str(e.value) == "404 Entity not found."
# SURVEY PACKAGES
def test_all_survey_packages(self, all_survey_packages):
"""Test structure returned by all_survey_packages"""
for package in all_survey_packages:
package_keys = package.keys()
assert len(package_keys) == len(self.p_model_keys)
for key in package_keys:
assert key in self.p_model_keys
assert type(package[key]) in package_model[key]
def test_single_survey_package_success(self, client, all_survey_packages):
"""Test structure and data returned by single_survey_package"""
package = client.single_survey_package("71C01598-4682-4A4C-90E6-69C0BD38EA47")
assert package == self.test_survey_package
def test_single_survey_package_fail(self, client, all_survey_packages):
"""Test calling on a non-existent survey package"""
with pytest.raises(CastorException) as e:
client.single_survey_package("71C01598-4682-4A4C-90E6-69C0BD38FAKE")
assert str(e.value) == "404 SurveyPackage Not Found"
# SURVEY PACKAGE INSTANCES
def test_all_survey_package_instances(self, all_survey_package_instances):
"""Test structure returned by all survey package instances"""
for package_instance in all_survey_package_instances:
instance_keys = package_instance.keys()
assert len(instance_keys) == len(self.i_model_keys)
for key in instance_keys:
assert key in self.i_model_keys
assert type(package_instance[key]) in survey_package_instance_model[key]
def test_all_survey_package_instance_record_success(
self, client, all_survey_package_instances
):
"""Test structure retuned by all_survey_package_instances after filtering on record"""
instances = client.all_survey_package_instances(record="000002")
for instance in instances:
assert instance["record_id"] == "000002"
instance_keys = instance.keys()
assert len(instance_keys) == len(self.i_model_keys)
for key in instance_keys:
assert key in self.i_model_keys
assert type(instance[key]) in survey_package_instance_model[key]
def test_all_survey_package_instance_record_fail(
self, client, all_survey_package_instances
):
"""Test filtering on non-existent record"""
with pytest.raises(CastorException) as e:
client.all_survey_package_instances(record="00FAKE")
assert str(e.value) == "404 Not found."
def test_single_survey_package_instance_success(
self, client, all_survey_package_instances
):
"""Test data and structure returned by selecting single survey."""
instance = client.single_survey_package_instance(
"115DF660-A00A-4927-9E5F-A07D030D4A09"
)
assert instance == self.test_survey_instance
def test_single_survey_package_instance_fail(
self, client, all_survey_package_instances
):
"""Test querying a non-existent survey."""
with pytest.raises(CastorException) as e:
client.single_survey_package_instance(
"115DF660-A00A-4927-9E5F-A07D030DFAKE"
)
assert str(e.value) == "404 Survey package invitation not found"
# POST
def test_create_survey_package_instance_success(self, client):
"""Tests creating a new survey package instance"""
old_amount = len(client.all_survey_package_instances(record="000001"))
body = create_survey_package_instance_body("000001", fake=False)
feedback = client.create_survey_package_instance(**body)
new_amount = len(client.all_survey_package_instances(record="000001"))
assert feedback["record_id"] == "000001"
assert new_amount == old_amount + 1
def test_create_survey_package_instance_fail(self, client):
"""Tests failing to create a new survey package instance by wrong survey_instance_id"""
body = create_survey_package_instance_body("000001", fake=True)
old_amount = len(client.all_survey_package_instances(record="000001"))
with pytest.raises(CastorException) as e:
client.create_survey_package_instance(**body)
assert str(e.value) == "422 Failed Validation"
new_amount = len(client.all_survey_package_instances(record="000001"))
assert new_amount == old_amount
def test_patch_survey_package_instance_success(
self, client, all_survey_package_instances
):
"""Tests patching (locking/unlocking) a survey_package_instance"""
package = client.single_survey_package_instance(
"23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C"
)
old_status = package["locked"]
target_status = not old_status
client.patch_survey_package_instance(
"23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C", target_status
)
package = client.single_survey_package_instance(
"23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C"
)
new_status = package["locked"]
assert new_status is not old_status
def test_patch_survey_package_instance_failure(
self, client, all_survey_package_instances
):
"""Tests failing to patch a survey_package_instance"""
package = client.single_survey_package_instance(
"23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C"
)
old_status = package["locked"]
target_status = not old_status
fake_id = "23B4FD48-BA41-4C9B-BAEF-D5C3DD5FFAKE"
with pytest.raises(CastorException) as e:
client.patch_survey_package_instance(fake_id, target_status)
assert str(e.value) == "404 Survey package invitation not found"
package = client.single_survey_package_instance(
"23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C"
)
new_status = package["locked"]
assert new_status is old_status
| 46.626424 | 417 | 0.584542 |
import pytest
from exceptions.exceptions import CastorException
from tests.test_api_endpoints.data_models import (
survey_model,
package_model,
survey_package_instance_model,
)
def create_survey_package_instance_body(record_id, fake):
if fake:
random_package = "FAKE"
else:
random_package = "71C01598-4682-4A4C-90E6-69C0BD38EA47"
return {
"survey_package_id": random_package,
"record_id": record_id,
"ccr_patient_id": None,
"email_address": "clearlyfakemail@itsascam.com",
"package_invitation_subject": None,
"package_invitation": None,
"auto_send": None,
"auto_lock_on_finish": None,
}
class TestSurveyEndpoints:
s_model_keys = survey_model.keys()
p_model_keys = package_model.keys()
i_model_keys = survey_package_instance_model.keys()
test_survey = {
"id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E",
"survey_id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E",
"name": "QOL Survey",
"description": "",
"intro_text": "##### This is the survey intro text. Here you can add some information for the participant that they will see before they start filling in the survey.\n```\n\n\n```\n##### Check the help text in the survey form editor to see how you can format this text or add images and links.\n```\n\n\n```\n### For example, you can use hashtags to make the text bigger or add headings.",
"outro_text": "",
"survey_steps": [],
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/survey/D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E"
}
},
}
test_survey_package = {
"id": "71C01598-4682-4A4C-90E6-69C0BD38EA47",
"survey_package_id": "71C01598-4682-4A4C-90E6-69C0BD38EA47",
"name": "My first survey package",
"description": "",
"sender_name": "Castor EDC",
"auto_send": False,
"allow_step_navigation": True,
"show_step_navigator": True,
"finish_url": "",
"auto_lock_on_finish": False,
"intro_text": "```\n\n\n```\n#### To be able to send surveys, you have to create a survey package that will contain the survey(s) you want to send.\n```\n\n\n```\nHere you can add intro text. This is similar to the intro text in a survey itself, but since a survey package can contain multiple surveys, this is a 'general' introduction that appears in the very beginning.",
"outro_text": "```\n\n\n```\n#### You can now create your own survey! \n```\n\n\n```\n#### Here is a giphy: \n```\n\n\n```\n.",
"default_invitation": 'Dear participant,\n\nYou are participating in the study "Example Study" and we would like to ask you to fill in a survey.\n\nPlease click the link below to complete our survey.\n\n{url}\n\n{logo}',
"default_invitation_subject": "Please fill in this survey for Example Study",
"sender_email": "no-reply@castoredc.com",
"is_mobile": False,
"expire_after_hours": None,
"_embedded": {
"surveys": [
{
"id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E",
"survey_id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E",
"name": "QOL Survey",
"description": "",
"intro_text": "##### This is the survey intro text. Here you can add some information for the participant that they will see before they start filling in the survey.\n```\n\n\n```\n##### Check the help text in the survey form editor to see how you can format this text or add images and links.\n```\n\n\n```\n### For example, you can use hashtags to make the text bigger or add headings.",
"outro_text": "",
"survey_steps": [],
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/survey/D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E"
}
},
}
]
},
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/surveypackage/71C01598-4682-4A4C-90E6-69C0BD38EA47"
}
},
}
test_survey_instance = {
"id": "115DF660-A00A-4927-9E5F-A07D030D4A09",
"survey_package_instance_id": "115DF660-A00A-4927-9E5F-A07D030D4A09",
"record_id": "000001",
"institute_id": "1CFF5802-0B07-471F-B97E-B5166332F2C5",
"institute_name": "Test Institute",
"survey_package_name": "My first survey package",
"survey_package_id": "71C01598-4682-4A4C-90E6-69C0BD38EA47",
"survey_url_string": "DUQKNQNN",
"progress": 100,
"invitation_subject": "Please fill in this survey for Example Study",
"invitation_content": 'Dear participant,\n\nYou are participating in the study "Example Study" and we would like to ask you to fill in a survey.\n\nPlease click the link below to complete our survey.\n\n{url}\n\n{logo}',
"created_on": {
"date": "2019-10-14 09:42:27.000000",
"timezone_type": 3,
"timezone": "Europe/Amsterdam",
},
"created_by": "B23ABCC4-3A53-FB32-7B78-3960CC907F25",
"available_from": {
"date": "2019-10-14 09:42:27.000000",
"timezone_type": 3,
"timezone": "Europe/Amsterdam",
},
"expire_on": None,
"sent_on": None,
"first_opened_on": None,
"finished_on": {
"date": "2020-08-14 16:27:12.000000",
"timezone_type": 3,
"timezone": "Europe/Amsterdam",
},
"locked": False,
"archived": False,
"auto_lock_on_finish": False,
"auto_send": False,
"_embedded": {
"record": {
"id": "000001",
"record_id": "000001",
"ccr_patient_id": "",
"last_opened_step": "FFF23B2C-AEE6-4304-9CC4-9C7C431D5387",
"progress": 28,
"status": "open",
"archived": False,
"archived_reason": None,
"created_by": "B23ABCC4-3A53-FB32-7B78-3960CC907F25",
"created_on": {
"date": "2019-10-07 16:16:02.000000",
"timezone_type": 3,
"timezone": "Europe/Amsterdam",
},
"updated_by": "B23ABCC4-3A53-FB32-7B78-3960CC907F25",
"updated_on": {
"date": "2020-11-27 14:37:55.000000",
"timezone_type": 3,
"timezone": "Europe/Amsterdam",
},
"randomized_id": None,
"randomized_on": None,
"randomization_group": None,
"randomization_group_name": None,
"_embedded": {
"institute": {
"id": "1CFF5802-0B07-471F-B97E-B5166332F2C5",
"institute_id": "1CFF5802-0B07-471F-B97E-B5166332F2C5",
"name": "Test Institute",
"abbreviation": "TES",
"code": "TES",
"order": 0,
"deleted": False,
"country_id": 169,
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/institute/1CFF5802-0B07-471F-B97E-B5166332F2C5"
}
},
}
},
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/record/000001"
}
},
},
"institute": {
"id": "1CFF5802-0B07-471F-B97E-B5166332F2C5",
"institute_id": "1CFF5802-0B07-471F-B97E-B5166332F2C5",
"name": "Test Institute",
"abbreviation": "TES",
"code": "TES",
"order": 0,
"deleted": False,
"country_id": 169,
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/institute/1CFF5802-0B07-471F-B97E-B5166332F2C5"
}
},
},
"survey_package": {
"id": "71C01598-4682-4A4C-90E6-69C0BD38EA47",
"survey_package_id": "71C01598-4682-4A4C-90E6-69C0BD38EA47",
"name": "My first survey package",
"description": "",
"sender_name": "Castor EDC",
"auto_send": False,
"allow_step_navigation": True,
"show_step_navigator": True,
"finish_url": "",
"auto_lock_on_finish": False,
"intro_text": "```\n\n\n```\n#### To be able to send surveys, you have to create a survey package that will contain the survey(s) you want to send.\n```\n\n\n```\nHere you can add intro text. This is similar to the intro text in a survey itself, but since a survey package can contain multiple surveys, this is a 'general' introduction that appears in the very beginning.",
"outro_text": "```\n\n\n```\n#### You can now create your own survey! \n```\n\n\n```\n#### Here is a giphy: \n```\n\n\n```\n.",
"default_invitation": 'Dear participant,\n\nYou are participating in the study "Example Study" and we would like to ask you to fill in a survey.\n\nPlease click the link below to complete our survey.\n\n{url}\n\n{logo}',
"default_invitation_subject": "Please fill in this survey for Example Study",
"sender_email": "no-reply@castoredc.com",
"is_mobile": False,
"expire_after_hours": None,
"_embedded": {
"surveys": [
{
"id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E",
"survey_id": "D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E",
"name": "QOL Survey",
"description": "",
"intro_text": "##### This is the survey intro text. Here you can add some information for the participant that they will see before they start filling in the survey.\n```\n\n\n```\n##### Check the help text in the survey form editor to see how you can format this text or add images and links.\n```\n\n\n```\n### For example, you can use hashtags to make the text bigger or add headings.",
"outro_text": "",
"survey_steps": [],
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/survey/D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E"
}
},
}
]
},
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/surveypackage/71C01598-4682-4A4C-90E6-69C0BD38EA47"
}
},
},
"survey_instances": [
{
"id": "6530D4AB-4705-4864-92AE-B0EC6200E8E5",
"progress": 100,
"progress_total_fields": 5,
"progress_total_fields_not_empty": 5,
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/survey/6530D4AB-4705-4864-92AE-B0EC6200E8E5"
}
},
}
],
"survey_reminders": [],
},
"_links": {
"self": {
"href": "https://data.castoredc.com/api/study/D234215B-D956-482D-BF17-71F2BB12A2FD/surveypackageinstance/115DF660-A00A-4927-9E5F-A07D030D4A09"
}
},
}
@pytest.fixture
def all_surveys(self, client):
all_surveys = client.all_surveys()
return all_surveys
@pytest.fixture
def all_survey_packages(self, client):
all_survey_packages = client.all_survey_packages()
return all_survey_packages
@pytest.fixture
def all_survey_package_instances(self, client):
all_survey_package_instances = client.all_survey_package_instances()
return all_survey_package_instances
def test_all_surveys(self, all_surveys):
for survey in all_surveys:
survey_keys = survey.keys()
assert len(survey_keys) == len(self.s_model_keys)
for key in survey_keys:
assert key in self.s_model_keys
assert type(survey[key]) in survey_model[key]
def test_single_survey_success(self, client, all_surveys):
survey = client.single_survey("D70C1273-B5D8-45CD-BFE8-A0BA75C44B7E")
assert survey == self.test_survey
def test_single_survey_fail(self, client, all_surveys):
with pytest.raises(CastorException) as e:
client.single_survey("D70C1273-B5D8-45CD-BFE8-A0BA75C4FAKE")
assert str(e.value) == "404 Entity not found."
def test_all_survey_packages(self, all_survey_packages):
for package in all_survey_packages:
package_keys = package.keys()
assert len(package_keys) == len(self.p_model_keys)
for key in package_keys:
assert key in self.p_model_keys
assert type(package[key]) in package_model[key]
def test_single_survey_package_success(self, client, all_survey_packages):
package = client.single_survey_package("71C01598-4682-4A4C-90E6-69C0BD38EA47")
assert package == self.test_survey_package
def test_single_survey_package_fail(self, client, all_survey_packages):
with pytest.raises(CastorException) as e:
client.single_survey_package("71C01598-4682-4A4C-90E6-69C0BD38FAKE")
assert str(e.value) == "404 SurveyPackage Not Found"
def test_all_survey_package_instances(self, all_survey_package_instances):
for package_instance in all_survey_package_instances:
instance_keys = package_instance.keys()
assert len(instance_keys) == len(self.i_model_keys)
for key in instance_keys:
assert key in self.i_model_keys
assert type(package_instance[key]) in survey_package_instance_model[key]
def test_all_survey_package_instance_record_success(
self, client, all_survey_package_instances
):
instances = client.all_survey_package_instances(record="000002")
for instance in instances:
assert instance["record_id"] == "000002"
instance_keys = instance.keys()
assert len(instance_keys) == len(self.i_model_keys)
for key in instance_keys:
assert key in self.i_model_keys
assert type(instance[key]) in survey_package_instance_model[key]
def test_all_survey_package_instance_record_fail(
self, client, all_survey_package_instances
):
with pytest.raises(CastorException) as e:
client.all_survey_package_instances(record="00FAKE")
assert str(e.value) == "404 Not found."
def test_single_survey_package_instance_success(
self, client, all_survey_package_instances
):
instance = client.single_survey_package_instance(
"115DF660-A00A-4927-9E5F-A07D030D4A09"
)
assert instance == self.test_survey_instance
def test_single_survey_package_instance_fail(
self, client, all_survey_package_instances
):
with pytest.raises(CastorException) as e:
client.single_survey_package_instance(
"115DF660-A00A-4927-9E5F-A07D030DFAKE"
)
assert str(e.value) == "404 Survey package invitation not found"
def test_create_survey_package_instance_success(self, client):
old_amount = len(client.all_survey_package_instances(record="000001"))
body = create_survey_package_instance_body("000001", fake=False)
feedback = client.create_survey_package_instance(**body)
new_amount = len(client.all_survey_package_instances(record="000001"))
assert feedback["record_id"] == "000001"
assert new_amount == old_amount + 1
def test_create_survey_package_instance_fail(self, client):
body = create_survey_package_instance_body("000001", fake=True)
old_amount = len(client.all_survey_package_instances(record="000001"))
with pytest.raises(CastorException) as e:
client.create_survey_package_instance(**body)
assert str(e.value) == "422 Failed Validation"
new_amount = len(client.all_survey_package_instances(record="000001"))
assert new_amount == old_amount
def test_patch_survey_package_instance_success(
self, client, all_survey_package_instances
):
package = client.single_survey_package_instance(
"23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C"
)
old_status = package["locked"]
target_status = not old_status
client.patch_survey_package_instance(
"23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C", target_status
)
package = client.single_survey_package_instance(
"23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C"
)
new_status = package["locked"]
assert new_status is not old_status
def test_patch_survey_package_instance_failure(
self, client, all_survey_package_instances
):
package = client.single_survey_package_instance(
"23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C"
)
old_status = package["locked"]
target_status = not old_status
fake_id = "23B4FD48-BA41-4C9B-BAEF-D5C3DD5FFAKE"
with pytest.raises(CastorException) as e:
client.patch_survey_package_instance(fake_id, target_status)
assert str(e.value) == "404 Survey package invitation not found"
package = client.single_survey_package_instance(
"23B4FD48-BA41-4C9B-BAEF-D5C3DD5F8E5C"
)
new_status = package["locked"]
assert new_status is old_status
| true | true |
f73ab90323589397a94c67f89b415f354f638968 | 1,665 | py | Python | data.py | stanford-oval/word-language-model | 3be3f65a198b518b66e22a910f28f83324db3825 | [
"BSD-3-Clause"
] | null | null | null | data.py | stanford-oval/word-language-model | 3be3f65a198b518b66e22a910f28f83324db3825 | [
"BSD-3-Clause"
] | null | null | null | data.py | stanford-oval/word-language-model | 3be3f65a198b518b66e22a910f28f83324db3825 | [
"BSD-3-Clause"
] | 2 | 2020-08-14T19:32:46.000Z | 2022-02-23T03:14:16.000Z | import os
import torch
class Dictionary(object):
"""Build word2idx and idx2word from Corpus(train/val/test)"""
def __init__(self):
self.word2idx = {} # word: index
self.idx2word = [] # position(index): word
def add_word(self, word):
"""Create/Update word2idx and idx2word"""
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
"""Corpus Tokenizer"""
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r') as f:
tokens = 0
for line in f:
# line to list of token + eos
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids | 32.019231 | 67 | 0.539339 | import os
import torch
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
assert os.path.exists(path)
with open(path, 'r') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
with open(path, 'r') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids | true | true |
f73abbb189b35907aecd60ecb4295aea42eb648b | 4,029 | py | Python | python/regvar/webapps/alignments/application.py | JohnReid/RegVar | ba48402d4ec4f3422f70410a1f4bb582f0f07d4a | [
"MIT"
] | null | null | null | python/regvar/webapps/alignments/application.py | JohnReid/RegVar | ba48402d4ec4f3422f70410a1f4bb582f0f07d4a | [
"MIT"
] | null | null | null | python/regvar/webapps/alignments/application.py | JohnReid/RegVar | ba48402d4ec4f3422f70410a1f4bb582f0f07d4a | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import logging
import bx.align.maf
import cStringIO
import os
import io
import ete2
from flask import Flask, request, send_file
LOGFORMAT = '%(asctime)-15s %(name)s %(levelname)s %(message)s'
logging.basicConfig(format=LOGFORMAT, level=logging.INFO)
logger = logging.getLogger(__name__)
ENVSETTINGSVAR = 'ALIGNMENTS_SETTINGS'
app = Flask(__name__)
# Change this to ProductionConfig when before deployment
app.config.from_object('regvar.webapps.alignments.DevelopmentConfig')
if os.environ.get(ENVSETTINGSVAR, ''):
app.config.from_envvar(ENVSETTINGSVAR)
def get_alignment_dir(genome, alignment):
return os.path.join(
app.config['UCSC_DIR'], 'goldenPath', genome, alignment)
def get_maf(genome, alignment, chrom):
return os.path.join(get_alignment_dir(genome, alignment),
'maf', '{0}.maf.bz2'.format(chrom))
def get_treefile(genome, alignment, treename):
return os.path.join(get_alignment_dir(genome, alignment),
'{0}.nh'.format(treename))
@app.route('/newick/<genome>/<alignment>/<treename>')
def newick(genome, alignment, treename):
return open(get_treefile(genome, alignment, treename)).read()
@app.route('/treeview/<genome>/<alignment>/<treename>')
def treeview(genome, alignment, treename):
imagetype = request.headers.get('ImageType', 'png')
import tempfile
# Construct the tree
tree = ete2.Tree(open(get_treefile(genome, alignment, treename)).read())
# Choose rendering style
ts = ete2.TreeStyle()
ts.show_leaf_name = True
ts.show_branch_length = True
ts.show_branch_support = False
# ts.scale = 120 # 120 pixels per branch length unit
# Choose a temporary file to store the rendered tree in
treefilename = tempfile.mktemp(suffix='.' + imagetype, prefix='treeview')
# Render the tree
tree.render(treefilename, tree_style=ts)
# Serve the image
return send_file(io.BytesIO(open(treefilename).read()),
mimetype='image/png')
@app.route('/alignment/<genome>/<alignment>/<chrom>/<int:start>/<int:end>')
def alignment(genome, alignment, chrom, start, end):
chop = bool(int(request.headers.get('Chop', 0)))
mincols = int(request.headers.get('Min-Cols', 0))
src = '{0}.{1}'.format(genome, chrom)
# app.logger.info(request.headers.get('User-Agent'))
# app.logger.info(chop)
index = bx.align.maf.MultiIndexed(
[get_maf(genome, alignment, chrom)],
keep_open=True,
parse_e_rows=True,
use_cache=True)
# Write MAF into string
output = cStringIO.StringIO()
out = bx.align.maf.Writer(output)
strand = None
# Find overlap with reference component
blocks = index.get(src, start, end)
# Write each intersecting block
if chop:
for block in blocks:
ref = block.get_component_by_src(src)
slice_start = max(start, ref.get_forward_strand_start())
slice_end = min(end, ref.get_forward_strand_end())
sliced = block.slice_by_component(ref, slice_start, slice_end)
# If the block is shorter than the minimum allowed size, stop
if mincols and (sliced.text_size < mincols):
continue
# If the reference component is empty, don't write the block
if sliced.get_component_by_src(src).size < 1:
continue
# Keep only components that are not empty
sliced.components = [c for c in sliced.components if c.size > 0]
# Reverse complement if needed
if (strand is not None) and (ref.strand != strand):
sliced = sliced.reverse_complement()
# Write the block
out.write(sliced)
else:
for block in blocks:
out.write(block)
result = output.getvalue()
output.close()
# Close output MAF
index.close()
out.close()
return result
if __name__ == '__main__':
app.run(host=app.config['HOST'], port=app.config['PORT'])
| 35.654867 | 77 | 0.660213 |
import logging
import bx.align.maf
import cStringIO
import os
import io
import ete2
from flask import Flask, request, send_file
LOGFORMAT = '%(asctime)-15s %(name)s %(levelname)s %(message)s'
logging.basicConfig(format=LOGFORMAT, level=logging.INFO)
logger = logging.getLogger(__name__)
ENVSETTINGSVAR = 'ALIGNMENTS_SETTINGS'
app = Flask(__name__)
app.config.from_object('regvar.webapps.alignments.DevelopmentConfig')
if os.environ.get(ENVSETTINGSVAR, ''):
app.config.from_envvar(ENVSETTINGSVAR)
def get_alignment_dir(genome, alignment):
return os.path.join(
app.config['UCSC_DIR'], 'goldenPath', genome, alignment)
def get_maf(genome, alignment, chrom):
return os.path.join(get_alignment_dir(genome, alignment),
'maf', '{0}.maf.bz2'.format(chrom))
def get_treefile(genome, alignment, treename):
return os.path.join(get_alignment_dir(genome, alignment),
'{0}.nh'.format(treename))
@app.route('/newick/<genome>/<alignment>/<treename>')
def newick(genome, alignment, treename):
return open(get_treefile(genome, alignment, treename)).read()
@app.route('/treeview/<genome>/<alignment>/<treename>')
def treeview(genome, alignment, treename):
imagetype = request.headers.get('ImageType', 'png')
import tempfile
tree = ete2.Tree(open(get_treefile(genome, alignment, treename)).read())
ts = ete2.TreeStyle()
ts.show_leaf_name = True
ts.show_branch_length = True
ts.show_branch_support = False
ktemp(suffix='.' + imagetype, prefix='treeview')
tree.render(treefilename, tree_style=ts)
return send_file(io.BytesIO(open(treefilename).read()),
mimetype='image/png')
@app.route('/alignment/<genome>/<alignment>/<chrom>/<int:start>/<int:end>')
def alignment(genome, alignment, chrom, start, end):
chop = bool(int(request.headers.get('Chop', 0)))
mincols = int(request.headers.get('Min-Cols', 0))
src = '{0}.{1}'.format(genome, chrom)
index = bx.align.maf.MultiIndexed(
[get_maf(genome, alignment, chrom)],
keep_open=True,
parse_e_rows=True,
use_cache=True)
output = cStringIO.StringIO()
out = bx.align.maf.Writer(output)
strand = None
blocks = index.get(src, start, end)
if chop:
for block in blocks:
ref = block.get_component_by_src(src)
slice_start = max(start, ref.get_forward_strand_start())
slice_end = min(end, ref.get_forward_strand_end())
sliced = block.slice_by_component(ref, slice_start, slice_end)
if mincols and (sliced.text_size < mincols):
continue
if sliced.get_component_by_src(src).size < 1:
continue
# Keep only components that are not empty
sliced.components = [c for c in sliced.components if c.size > 0]
# Reverse complement if needed
if (strand is not None) and (ref.strand != strand):
sliced = sliced.reverse_complement()
# Write the block
out.write(sliced)
else:
for block in blocks:
out.write(block)
result = output.getvalue()
output.close()
# Close output MAF
index.close()
out.close()
return result
if __name__ == '__main__':
app.run(host=app.config['HOST'], port=app.config['PORT'])
| true | true |
f73abc1e615309be1749df31916bc425ccea0619 | 352 | py | Python | setup.py | Ykobe/fingerid | 9c7cbeb3f0350c64a210c262e47264246dde4997 | [
"Apache-2.0"
] | 11 | 2015-10-08T07:19:05.000Z | 2020-05-27T12:10:31.000Z | setup.py | Ykobe/fingerid | 9c7cbeb3f0350c64a210c262e47264246dde4997 | [
"Apache-2.0"
] | 7 | 2016-05-25T21:37:28.000Z | 2018-10-03T09:37:31.000Z | setup.py | Ykobe/fingerid | 9c7cbeb3f0350c64a210c262e47264246dde4997 | [
"Apache-2.0"
] | 4 | 2018-11-20T01:07:05.000Z | 2020-01-12T11:36:14.000Z |
from setuptools import setup, find_packages
config = {
'description':'fingerid-package',
'author':'Huibin Shen',
'url':'project https://github.com/icdishb/fingerid',
'author_email':'huibin.shen@aalto.fi',
'version':'1.4',
'install_requires':['nose'],
'packages':find_packages(),
'name':'fingerid',
}
setup(**config)
| 20.705882 | 56 | 0.644886 |
from setuptools import setup, find_packages
config = {
'description':'fingerid-package',
'author':'Huibin Shen',
'url':'project https://github.com/icdishb/fingerid',
'author_email':'huibin.shen@aalto.fi',
'version':'1.4',
'install_requires':['nose'],
'packages':find_packages(),
'name':'fingerid',
}
setup(**config)
| true | true |
f73abd597a9a5836a9e8b97a95c5dd2a0885178e | 598 | py | Python | TypesandVariableswithTextProcessing/GladiatorExpenses.py | mironmiron3/SoftuniPythonFundamentals | 4f2d968250ae5c58c2b60242da013fe319781c6b | [
"MIT"
] | null | null | null | TypesandVariableswithTextProcessing/GladiatorExpenses.py | mironmiron3/SoftuniPythonFundamentals | 4f2d968250ae5c58c2b60242da013fe319781c6b | [
"MIT"
] | null | null | null | TypesandVariableswithTextProcessing/GladiatorExpenses.py | mironmiron3/SoftuniPythonFundamentals | 4f2d968250ae5c58c2b60242da013fe319781c6b | [
"MIT"
] | null | null | null | lost_fights = int(input())
helmet_price = float(input())
sword_price = float(input())
shield_price = float(input())
armor_price = float(input())
sum = 0
shield_breaks = 0
for i in range (1, lost_fights + 1):
if i % 2 == 0:
sum += helmet_price
if i % 3 == 0:
sum += sword_price
if i % 2 == 0:
sum += shield_price
shield_breaks += 1
if shield_breaks != 0 and shield_breaks % 2 == 0:
sum += armor_price
if shield_breaks == 2:
shield_breaks = 0
print(f'Gladiator expenses: {sum:.2f} aureus')
| 23 | 54 | 0.555184 | lost_fights = int(input())
helmet_price = float(input())
sword_price = float(input())
shield_price = float(input())
armor_price = float(input())
sum = 0
shield_breaks = 0
for i in range (1, lost_fights + 1):
if i % 2 == 0:
sum += helmet_price
if i % 3 == 0:
sum += sword_price
if i % 2 == 0:
sum += shield_price
shield_breaks += 1
if shield_breaks != 0 and shield_breaks % 2 == 0:
sum += armor_price
if shield_breaks == 2:
shield_breaks = 0
print(f'Gladiator expenses: {sum:.2f} aureus')
| true | true |
f73abf8634e9e62a45b595822f93f50e03af4672 | 12,085 | py | Python | interfaces/acados_template/acados_template/acados_sim.py | KexianShen/acados | 2981d29dc6ecdaabdb39cd6c0d784724704afe4a | [
"BSD-2-Clause"
] | null | null | null | interfaces/acados_template/acados_template/acados_sim.py | KexianShen/acados | 2981d29dc6ecdaabdb39cd6c0d784724704afe4a | [
"BSD-2-Clause"
] | null | null | null | interfaces/acados_template/acados_template/acados_sim.py | KexianShen/acados | 2981d29dc6ecdaabdb39cd6c0d784724704afe4a | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: future_fstrings -*-
#
# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,
# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,
# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,
# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl
#
# This file is part of acados.
#
# The 2-Clause BSD License
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.;
#
import numpy as np
import casadi as ca
import os
from .acados_model import AcadosModel
from .utils import get_acados_path, get_lib_ext
class AcadosSimDims:
"""
Class containing the dimensions of the model to be simulated.
"""
def __init__(self):
self.__nx = None
self.__nu = None
self.__nz = 0
self.__np = 0
@property
def nx(self):
""":math:`n_x` - number of states. Type: int > 0"""
return self.__nx
@property
def nz(self):
""":math:`n_z` - number of algebraic variables. Type: int >= 0"""
return self.__nz
@property
def nu(self):
""":math:`n_u` - number of inputs. Type: int >= 0"""
return self.__nu
@property
def np(self):
""":math:`n_p` - number of parameters. Type: int >= 0"""
return self.__np
@nx.setter
def nx(self, nx):
if isinstance(nx, int) and nx > 0:
self.__nx = nx
else:
raise Exception('Invalid nx value, expected positive integer.')
@nz.setter
def nz(self, nz):
if isinstance(nz, int) and nz > -1:
self.__nz = nz
else:
raise Exception('Invalid nz value, expected nonnegative integer.')
@nu.setter
def nu(self, nu):
if isinstance(nu, int) and nu > -1:
self.__nu = nu
else:
raise Exception('Invalid nu value, expected nonnegative integer.')
@np.setter
def np(self, np):
if isinstance(np, int) and np > -1:
self.__np = np
else:
raise Exception('Invalid np value, expected nonnegative integer.')
def set(self, attr, value):
setattr(self, attr, value)
class AcadosSimOpts:
"""
class containing the solver options
"""
def __init__(self):
self.__integrator_type = 'ERK'
self.__collocation_type = 'GAUSS_LEGENDRE'
self.__Tsim = None
# ints
self.__sim_method_num_stages = 1
self.__sim_method_num_steps = 1
self.__sim_method_newton_iter = 3
# bools
self.__sens_forw = True
self.__sens_adj = False
self.__sens_algebraic = False
self.__sens_hess = False
self.__output_z = False
self.__sim_method_jac_reuse = 0
@property
def integrator_type(self):
"""Integrator type. Default: 'ERK'."""
return self.__integrator_type
@property
def num_stages(self):
"""Number of stages in the integrator. Default: 1"""
return self.__sim_method_num_stages
@property
def num_steps(self):
"""Number of steps in the integrator. Default: 1"""
return self.__sim_method_num_steps
@property
def newton_iter(self):
"""Number of Newton iterations in simulation method. Default: 3"""
return self.__sim_method_newton_iter
@property
def sens_forw(self):
"""Boolean determining if forward sensitivities are computed. Default: True"""
return self.__sens_forw
@property
def sens_adj(self):
"""Boolean determining if adjoint sensitivities are computed. Default: False"""
return self.__sens_adj
@property
def sens_algebraic(self):
"""Boolean determining if sensitivities wrt algebraic variables are computed. Default: False"""
return self.__sens_algebraic
@property
def sens_hess(self):
"""Boolean determining if hessians are computed. Default: False"""
return self.__sens_hess
@property
def output_z(self):
"""Boolean determining if values for algebraic variables (corresponding to start of simulation interval) are computed. Default: False"""
return self.__output_z
@property
def sim_method_jac_reuse(self):
"""Integer determining if jacobians are reused (0 or 1). Default: 0"""
return self.__sim_method_jac_reuse
@property
def T(self):
"""Time horizon"""
return self.__Tsim
@property
def collocation_type(self):
"""Collocation type: relevant for implicit integrators
-- string in {GAUSS_RADAU_IIA, GAUSS_LEGENDRE}
Default: GAUSS_LEGENDRE
"""
return self.__collocation_type
@integrator_type.setter
def integrator_type(self, integrator_type):
integrator_types = ('ERK', 'IRK', 'GNSF')
if integrator_type in integrator_types:
self.__integrator_type = integrator_type
else:
raise Exception('Invalid integrator_type value. Possible values are:\n\n' \
+ ',\n'.join(integrator_types) + '.\n\nYou have: ' + integrator_type + '.\n\n')
@collocation_type.setter
def collocation_type(self, collocation_type):
collocation_types = ('GAUSS_RADAU_IIA', 'GAUSS_LEGENDRE')
if collocation_type in collocation_types:
self.__collocation_type = collocation_type
else:
raise Exception('Invalid collocation_type value. Possible values are:\n\n' \
+ ',\n'.join(collocation_types) + '.\n\nYou have: ' + collocation_type + '.\n\n')
@T.setter
def T(self, T):
self.__Tsim = T
@num_stages.setter
def num_stages(self, num_stages):
if isinstance(num_stages, int):
self.__sim_method_num_stages = num_stages
else:
raise Exception('Invalid num_stages value. num_stages must be an integer.')
@num_steps.setter
def num_steps(self, num_steps):
if isinstance(num_steps, int):
self.__sim_method_num_steps = num_steps
else:
raise Exception('Invalid num_steps value. num_steps must be an integer.')
@newton_iter.setter
def newton_iter(self, newton_iter):
if isinstance(newton_iter, int):
self.__sim_method_newton_iter = newton_iter
else:
raise Exception('Invalid newton_iter value. newton_iter must be an integer.')
@sens_forw.setter
def sens_forw(self, sens_forw):
if sens_forw in (True, False):
self.__sens_forw = sens_forw
else:
raise Exception('Invalid sens_forw value. sens_forw must be a Boolean.')
@sens_adj.setter
def sens_adj(self, sens_adj):
if sens_adj in (True, False):
self.__sens_adj = sens_adj
else:
raise Exception('Invalid sens_adj value. sens_adj must be a Boolean.')
@sens_hess.setter
def sens_hess(self, sens_hess):
if sens_hess in (True, False):
self.__sens_hess = sens_hess
else:
raise Exception('Invalid sens_hess value. sens_hess must be a Boolean.')
@sens_algebraic.setter
def sens_algebraic(self, sens_algebraic):
if sens_algebraic in (True, False):
self.__sens_algebraic = sens_algebraic
else:
raise Exception('Invalid sens_algebraic value. sens_algebraic must be a Boolean.')
@output_z.setter
def output_z(self, output_z):
if output_z in (True, False):
self.__output_z = output_z
else:
raise Exception('Invalid output_z value. output_z must be a Boolean.')
@sim_method_jac_reuse.setter
def sim_method_jac_reuse(self, sim_method_jac_reuse):
if sim_method_jac_reuse in (0, 1):
self.__sim_method_jac_reuse = sim_method_jac_reuse
else:
raise Exception('Invalid sim_method_jac_reuse value. sim_method_jac_reuse must be 0 or 1.')
class AcadosSim:
"""
The class has the following properties that can be modified to formulate a specific simulation problem, see below:
:param acados_path: string with the path to acados. It is used to generate the include and lib paths.
- :py:attr:`dims` of type :py:class:`acados_template.acados_ocp.AcadosSimDims` - are automatically detected from model
- :py:attr:`model` of type :py:class:`acados_template.acados_model.AcadosModel`
- :py:attr:`solver_options` of type :py:class:`acados_template.acados_sim.AcadosSimOpts`
- :py:attr:`acados_include_path` (set automatically)
- :py:attr:`shared_lib_ext` (set automatically)
- :py:attr:`acados_lib_path` (set automatically)
- :py:attr:`parameter_values` - used to initialize the parameters (can be changed)
"""
def __init__(self, acados_path=''):
if acados_path == '':
acados_path = get_acados_path()
self.dims = AcadosSimDims()
"""Dimension definitions, automatically detected from :py:attr:`model`. Type :py:class:`acados_template.acados_sim.AcadosSimDims`"""
self.model = AcadosModel()
"""Model definitions, type :py:class:`acados_template.acados_model.AcadosModel`"""
self.solver_options = AcadosSimOpts()
"""Solver Options, type :py:class:`acados_template.acados_sim.AcadosSimOpts`"""
self.acados_include_path = os.path.join(acados_path, 'include').replace(os.sep, '/') # the replace part is important on Windows for CMake
"""Path to acados include directory (set automatically), type: `string`"""
self.acados_lib_path = os.path.join(acados_path, 'lib').replace(os.sep, '/') # the replace part is important on Windows for CMake
"""Path to where acados library is located (set automatically), type: `string`"""
self.code_export_directory = 'c_generated_code'
"""Path to where code will be exported. Default: `c_generated_code`."""
self.shared_lib_ext = get_lib_ext()
self.cython_include_dirs = ''
self.__parameter_values = np.array([])
@property
def parameter_values(self):
""":math:`p` - initial values for parameter - can be updated"""
return self.__parameter_values
@parameter_values.setter
def parameter_values(self, parameter_values):
if isinstance(parameter_values, np.ndarray):
self.__parameter_values = parameter_values
else:
raise Exception('Invalid parameter_values value. ' +
f'Expected numpy array, got {type(parameter_values)}.')
def set(self, attr, value):
# tokenize string
tokens = attr.split('_', 1)
if len(tokens) > 1:
setter_to_call = getattr(getattr(self, tokens[0]), 'set')
else:
setter_to_call = getattr(self, 'set')
setter_to_call(tokens[1], value)
return
| 36.182635 | 145 | 0.658833 |
import numpy as np
import casadi as ca
import os
from .acados_model import AcadosModel
from .utils import get_acados_path, get_lib_ext
class AcadosSimDims:
def __init__(self):
self.__nx = None
self.__nu = None
self.__nz = 0
self.__np = 0
@property
def nx(self):
return self.__nx
@property
def nz(self):
return self.__nz
@property
def nu(self):
return self.__nu
@property
def np(self):
return self.__np
@nx.setter
def nx(self, nx):
if isinstance(nx, int) and nx > 0:
self.__nx = nx
else:
raise Exception('Invalid nx value, expected positive integer.')
@nz.setter
def nz(self, nz):
if isinstance(nz, int) and nz > -1:
self.__nz = nz
else:
raise Exception('Invalid nz value, expected nonnegative integer.')
@nu.setter
def nu(self, nu):
if isinstance(nu, int) and nu > -1:
self.__nu = nu
else:
raise Exception('Invalid nu value, expected nonnegative integer.')
@np.setter
def np(self, np):
if isinstance(np, int) and np > -1:
self.__np = np
else:
raise Exception('Invalid np value, expected nonnegative integer.')
def set(self, attr, value):
setattr(self, attr, value)
class AcadosSimOpts:
def __init__(self):
self.__integrator_type = 'ERK'
self.__collocation_type = 'GAUSS_LEGENDRE'
self.__Tsim = None
self.__sim_method_num_stages = 1
self.__sim_method_num_steps = 1
self.__sim_method_newton_iter = 3
self.__sens_forw = True
self.__sens_adj = False
self.__sens_algebraic = False
self.__sens_hess = False
self.__output_z = False
self.__sim_method_jac_reuse = 0
@property
def integrator_type(self):
return self.__integrator_type
@property
def num_stages(self):
return self.__sim_method_num_stages
@property
def num_steps(self):
return self.__sim_method_num_steps
@property
def newton_iter(self):
return self.__sim_method_newton_iter
@property
def sens_forw(self):
return self.__sens_forw
@property
def sens_adj(self):
return self.__sens_adj
@property
def sens_algebraic(self):
return self.__sens_algebraic
@property
def sens_hess(self):
return self.__sens_hess
@property
def output_z(self):
return self.__output_z
@property
def sim_method_jac_reuse(self):
return self.__sim_method_jac_reuse
@property
def T(self):
return self.__Tsim
@property
def collocation_type(self):
return self.__collocation_type
@integrator_type.setter
def integrator_type(self, integrator_type):
integrator_types = ('ERK', 'IRK', 'GNSF')
if integrator_type in integrator_types:
self.__integrator_type = integrator_type
else:
raise Exception('Invalid integrator_type value. Possible values are:\n\n' \
+ ',\n'.join(integrator_types) + '.\n\nYou have: ' + integrator_type + '.\n\n')
@collocation_type.setter
def collocation_type(self, collocation_type):
collocation_types = ('GAUSS_RADAU_IIA', 'GAUSS_LEGENDRE')
if collocation_type in collocation_types:
self.__collocation_type = collocation_type
else:
raise Exception('Invalid collocation_type value. Possible values are:\n\n' \
+ ',\n'.join(collocation_types) + '.\n\nYou have: ' + collocation_type + '.\n\n')
@T.setter
def T(self, T):
self.__Tsim = T
@num_stages.setter
def num_stages(self, num_stages):
if isinstance(num_stages, int):
self.__sim_method_num_stages = num_stages
else:
raise Exception('Invalid num_stages value. num_stages must be an integer.')
@num_steps.setter
def num_steps(self, num_steps):
if isinstance(num_steps, int):
self.__sim_method_num_steps = num_steps
else:
raise Exception('Invalid num_steps value. num_steps must be an integer.')
@newton_iter.setter
def newton_iter(self, newton_iter):
if isinstance(newton_iter, int):
self.__sim_method_newton_iter = newton_iter
else:
raise Exception('Invalid newton_iter value. newton_iter must be an integer.')
@sens_forw.setter
def sens_forw(self, sens_forw):
if sens_forw in (True, False):
self.__sens_forw = sens_forw
else:
raise Exception('Invalid sens_forw value. sens_forw must be a Boolean.')
@sens_adj.setter
def sens_adj(self, sens_adj):
if sens_adj in (True, False):
self.__sens_adj = sens_adj
else:
raise Exception('Invalid sens_adj value. sens_adj must be a Boolean.')
@sens_hess.setter
def sens_hess(self, sens_hess):
if sens_hess in (True, False):
self.__sens_hess = sens_hess
else:
raise Exception('Invalid sens_hess value. sens_hess must be a Boolean.')
@sens_algebraic.setter
def sens_algebraic(self, sens_algebraic):
if sens_algebraic in (True, False):
self.__sens_algebraic = sens_algebraic
else:
raise Exception('Invalid sens_algebraic value. sens_algebraic must be a Boolean.')
@output_z.setter
def output_z(self, output_z):
if output_z in (True, False):
self.__output_z = output_z
else:
raise Exception('Invalid output_z value. output_z must be a Boolean.')
@sim_method_jac_reuse.setter
def sim_method_jac_reuse(self, sim_method_jac_reuse):
if sim_method_jac_reuse in (0, 1):
self.__sim_method_jac_reuse = sim_method_jac_reuse
else:
raise Exception('Invalid sim_method_jac_reuse value. sim_method_jac_reuse must be 0 or 1.')
class AcadosSim:
def __init__(self, acados_path=''):
if acados_path == '':
acados_path = get_acados_path()
self.dims = AcadosSimDims()
self.model = AcadosModel()
self.solver_options = AcadosSimOpts()
self.acados_include_path = os.path.join(acados_path, 'include').replace(os.sep, '/')
self.acados_lib_path = os.path.join(acados_path, 'lib').replace(os.sep, '/')
self.code_export_directory = 'c_generated_code'
self.shared_lib_ext = get_lib_ext()
self.cython_include_dirs = ''
self.__parameter_values = np.array([])
@property
def parameter_values(self):
return self.__parameter_values
@parameter_values.setter
def parameter_values(self, parameter_values):
if isinstance(parameter_values, np.ndarray):
self.__parameter_values = parameter_values
else:
raise Exception('Invalid parameter_values value. ' +
f'Expected numpy array, got {type(parameter_values)}.')
def set(self, attr, value):
tokens = attr.split('_', 1)
if len(tokens) > 1:
setter_to_call = getattr(getattr(self, tokens[0]), 'set')
else:
setter_to_call = getattr(self, 'set')
setter_to_call(tokens[1], value)
return
| true | true |
f73abfc472e17ce4fa2f46adb85cd85b0ea94d15 | 1,811 | py | Python | test/test_team_api.py | angeiv/python-quay | 16072f87956d8f581ac9ebccc67f6563e977cf52 | [
"MIT"
] | null | null | null | test/test_team_api.py | angeiv/python-quay | 16072f87956d8f581ac9ebccc67f6563e977cf52 | [
"MIT"
] | null | null | null | test/test_team_api.py | angeiv/python-quay | 16072f87956d8f581ac9ebccc67f6563e977cf52 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Quay Frontend
This API allows you to perform many of the operations required to work with Quay repositories, users, and organizations. You can find out more at <a href=\"https://quay.io\">Quay</a>. # noqa: E501
OpenAPI spec version: v1
Contact: support@quay.io
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import quay
from api.team_api import TeamApi # noqa: E501
from quay.rest import ApiException
class TestTeamApi(unittest.TestCase):
"""TeamApi unit test stubs"""
def setUp(self):
self.api = api.team_api.TeamApi() # noqa: E501
def tearDown(self):
pass
def test_delete_organization_team(self):
"""Test case for delete_organization_team
"""
pass
def test_delete_organization_team_member(self):
"""Test case for delete_organization_team_member
"""
pass
def test_delete_team_member_email_invite(self):
"""Test case for delete_team_member_email_invite
"""
pass
def test_get_organization_team_members(self):
"""Test case for get_organization_team_members
"""
pass
def test_get_organization_team_permissions(self):
"""Test case for get_organization_team_permissions
"""
pass
def test_invite_team_member_email(self):
"""Test case for invite_team_member_email
"""
pass
def test_update_organization_team(self):
"""Test case for update_organization_team
"""
pass
def test_update_organization_team_member(self):
"""Test case for update_organization_team_member
"""
pass
if __name__ == '__main__':
unittest.main()
| 22.085366 | 201 | 0.663722 |
from __future__ import absolute_import
import unittest
import quay
from api.team_api import TeamApi
from quay.rest import ApiException
class TestTeamApi(unittest.TestCase):
def setUp(self):
self.api = api.team_api.TeamApi()
def tearDown(self):
pass
def test_delete_organization_team(self):
pass
def test_delete_organization_team_member(self):
pass
def test_delete_team_member_email_invite(self):
pass
def test_get_organization_team_members(self):
pass
def test_get_organization_team_permissions(self):
pass
def test_invite_team_member_email(self):
pass
def test_update_organization_team(self):
pass
def test_update_organization_team_member(self):
pass
if __name__ == '__main__':
unittest.main()
| true | true |
f73ac1a9538d78cb00c553d98f45d1bf5f55f4e6 | 1,217 | py | Python | code/utils/numerical/ssyrk.py | dmytrov/gaussianprocess | 7044bd2d66f44e10656fee17e94fdee0c24c70bb | [
"MIT"
] | null | null | null | code/utils/numerical/ssyrk.py | dmytrov/gaussianprocess | 7044bd2d66f44e10656fee17e94fdee0c24c70bb | [
"MIT"
] | null | null | null | code/utils/numerical/ssyrk.py | dmytrov/gaussianprocess | 7044bd2d66f44e10656fee17e94fdee0c24c70bb | [
"MIT"
] | null | null | null | import os
import sys
import time
import numpy
from numpy import zeros
from numpy.random import randn
from scipy.linalg import blas
def run_ssyrk(N, l):
A = randn(N, N).astype('float32', order='F')
C = zeros((N, N), dtype='float32', order='F')
start = time.time()
for i in range(0, l):
blas.ssyrk(1.0, A, c=C, overwrite_c=True)
end = time.time()
timediff = (end - start)
mflops = (N * N * N) * l / timediff
mflops *= 1e-6
size = "%dx%d" % (N, N)
print("%14s :\t%20f MFlops\t%20f sec" % (size, mflops, timediff))
if __name__ == "__main__":
N = 128
NMAX = 2048
NINC = 128
LOOPS = 1
z = 0
for arg in sys.argv:
if z == 1:
N = int(arg)
elif z == 2:
NMAX = int(arg)
elif z == 3:
NINC = int(arg)
elif z == 4:
LOOPS = int(arg)
z = z + 1
if 'OPENBLAS_LOOPS' in os.environ:
p = os.environ['OPENBLAS_LOOPS']
if p:
LOOPS = int(p)
print("From: %d To: %d Step=%d Loops=%d" % (N, NMAX, NINC, LOOPS))
print("\tSIZE\t\t\tFlops\t\t\t\t\tTime")
for i in range(N, NMAX + NINC, NINC):
run_ssyrk(i, LOOPS)
| 20.982759 | 70 | 0.515201 | import os
import sys
import time
import numpy
from numpy import zeros
from numpy.random import randn
from scipy.linalg import blas
def run_ssyrk(N, l):
A = randn(N, N).astype('float32', order='F')
C = zeros((N, N), dtype='float32', order='F')
start = time.time()
for i in range(0, l):
blas.ssyrk(1.0, A, c=C, overwrite_c=True)
end = time.time()
timediff = (end - start)
mflops = (N * N * N) * l / timediff
mflops *= 1e-6
size = "%dx%d" % (N, N)
print("%14s :\t%20f MFlops\t%20f sec" % (size, mflops, timediff))
if __name__ == "__main__":
N = 128
NMAX = 2048
NINC = 128
LOOPS = 1
z = 0
for arg in sys.argv:
if z == 1:
N = int(arg)
elif z == 2:
NMAX = int(arg)
elif z == 3:
NINC = int(arg)
elif z == 4:
LOOPS = int(arg)
z = z + 1
if 'OPENBLAS_LOOPS' in os.environ:
p = os.environ['OPENBLAS_LOOPS']
if p:
LOOPS = int(p)
print("From: %d To: %d Step=%d Loops=%d" % (N, NMAX, NINC, LOOPS))
print("\tSIZE\t\t\tFlops\t\t\t\t\tTime")
for i in range(N, NMAX + NINC, NINC):
run_ssyrk(i, LOOPS)
| true | true |
f73ac1b6436bdbc2c539fd4445e25a2425a99511 | 1,270 | py | Python | cages/.shared/config_interface_http_1.py | targeted/pythomnic3k | c59f8c11302c0a568f45ec626ec6a0065527aa79 | [
"BSD-3-Clause"
] | null | null | null | cages/.shared/config_interface_http_1.py | targeted/pythomnic3k | c59f8c11302c0a568f45ec626ec6a0065527aa79 | [
"BSD-3-Clause"
] | 7 | 2019-06-06T15:47:56.000Z | 2019-06-15T18:09:30.000Z | cages/.shared/config_interface_http_1.py | targeted/pythomnic3k | c59f8c11302c0a568f45ec626ec6a0065527aa79 | [
"BSD-3-Clause"
] | null | null | null | # configuration file for interface "http_1"
# this file exists as a reference for configuring HTTP interfaces
#
# copy this file to your own cage, possibly renaming into
# config_interface_YOUR_INTERFACE_NAME.py, then modify the copy
config = dict \
(
protocol = "http", # meta
listener_address = ("0.0.0.0", 80), # tcp
max_connections = 100, # tcp
ssl_key_cert_file = None, # ssl, optional filename
ssl_ca_cert_file = None, # ssl, optional filename
ssl_ciphers = None, # ssl, optional str
ssl_protocol = None, # ssl, optional "SSLv23", "TLSv1", "TLSv1_1", "TLSv1_2" or "TLS"
response_encoding = "windows-1251", # http
original_ip_header_fields = (), # http
allowed_methods = ("GET", "POST"), # http
keep_alive_support = True, # http
keep_alive_idle_timeout = 120.0, # http
keep_alive_max_requests = 10, # http
gzip_content_types = (), # http, tuple of regexes like "text/.+"
)
# DO NOT TOUCH BELOW THIS LINE
__all__ = [ "get", "copy" ]
get = lambda key, default = None: pmnc.config.get_(config, {}, key, default)
copy = lambda: pmnc.config.copy_(config, {})
# EOF
| 38.484848 | 106 | 0.603937 |
config = dict \
(
protocol = "http",
listener_address = ("0.0.0.0", 80),
max_connections = 100,
ssl_key_cert_file = None,
ssl_ca_cert_file = None,
ssl_ciphers = None,
ssl_protocol = None,
response_encoding = "windows-1251",
original_ip_header_fields = (),
allowed_methods = ("GET", "POST"),
keep_alive_support = True,
keep_alive_idle_timeout = 120.0,
keep_alive_max_requests = 10,
gzip_content_types = (),
)
__all__ = [ "get", "copy" ]
get = lambda key, default = None: pmnc.config.get_(config, {}, key, default)
copy = lambda: pmnc.config.copy_(config, {})
| true | true |
f73ac2d1a91f5a501e93c6d1a2da04f2a930a4fe | 91,108 | py | Python | ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow_estimator/python/estimator/canned/v1/linear_testing_utils_v1.py | Lube-Project/ProgettoLube | cbf33971e2c2e865783ec1a2302625539186a338 | [
"MIT"
] | 288 | 2018-10-10T14:35:11.000Z | 2022-03-26T14:49:13.000Z | ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow_estimator/python/estimator/canned/v1/linear_testing_utils_v1.py | Lube-Project/ProgettoLube | cbf33971e2c2e865783ec1a2302625539186a338 | [
"MIT"
] | 49 | 2018-10-19T08:49:10.000Z | 2021-11-23T02:52:04.000Z | ProgettoLube/WebInspector/venv/Lib/site-packages/tensorflow_estimator/python/estimator/canned/v1/linear_testing_utils_v1.py | Lube-Project/ProgettoLube | cbf33971e2c2e865783ec1a2302625539186a338 | [
"MIT"
] | 228 | 2018-10-10T14:41:08.000Z | 2022-03-28T10:54:05.000Z | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utils for testing linear estimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.feature_column import feature_column
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow_estimator.python.estimator import estimator
from tensorflow_estimator.python.estimator import run_config
from tensorflow_estimator.python.estimator.canned import linear
from tensorflow_estimator.python.estimator.canned import metric_keys
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.inputs import numpy_io
from tensorflow_estimator.python.estimator.inputs import pandas_io
try:
# pylint: disable=g-import-not-at-top
import pandas as pd
HAS_PANDAS = True
except IOError:
# Pandas writes a temporary file during import. If it fails, don't use pandas.
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
AGE_WEIGHT_NAME = 'linear/linear_model/age/weights'
HEIGHT_WEIGHT_NAME = 'linear/linear_model/height/weights'
OCCUPATION_WEIGHT_NAME = 'linear/linear_model/occupation/weights'
BIAS_NAME = 'linear/linear_model/bias_weights'
LANGUAGE_WEIGHT_NAME = 'linear/linear_model/language/weights'
# This is so that we can easily switch between feature_column and
# feature_column_v2 for testing.
feature_column.numeric_column = feature_column._numeric_column
feature_column.categorical_column_with_hash_bucket = feature_column._categorical_column_with_hash_bucket # pylint: disable=line-too-long
feature_column.categorical_column_with_vocabulary_list = feature_column._categorical_column_with_vocabulary_list # pylint: disable=line-too-long
feature_column.categorical_column_with_vocabulary_file = feature_column._categorical_column_with_vocabulary_file # pylint: disable=line-too-long
feature_column.embedding_column = feature_column._embedding_column
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = tf.math.abs(expected - actual, 'diff') / tf.math.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return tf.compat.v1.debugging.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [tf.compat.v1.initializers.global_variables()]
with tf.compat.v1.Session() as sess:
sess.run(init_all_op)
tf.compat.v1.train.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = tf.queue.FIFOQueue(capacity=100, dtypes=queue_dtypes)
tf.compat.v1.train.queue_runner.add_queue_runner(
tf.compat.v1.train.queue_runner.QueueRunner(
input_queue, [input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
class CheckPartitionerVarHook(tf.compat.v1.train.SessionRunHook):
"""A `SessionRunHook` to check a partitioned variable."""
def __init__(self, test_case, var_name, var_dim, partitions):
self._test_case = test_case
self._var_name = var_name
self._var_dim = var_dim
self._partitions = partitions
def begin(self):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope()) as scope:
scope.reuse_variables()
partitioned_weight = tf.compat.v1.get_variable(
self._var_name, shape=(self._var_dim, 1))
self._test_case.assertTrue(
isinstance(partitioned_weight, variables_lib.PartitionedVariable))
for part in partitioned_weight:
self._test_case.assertEqual(self._var_dim // self._partitions,
part.get_shape()[0])
class BaseLinearRegressorPartitionerTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def testPartitioner(self):
x_dim = 64
partitions = 4
def _partitioner(shape, dtype):
del dtype # unused; required by Fn signature.
# Only partition the embedding tensor.
return [partitions, 1] if shape[0] == x_dim else [1]
regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
partitioner=_partitioner,
model_dir=self._model_dir)
def _input_fn():
return {
'language':
tf.sparse.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
def testDefaultPartitionerWithMultiplePsReplicas(self):
partitions = 2
# This results in weights larger than the default partition size of 64M,
# so partitioned weights are created (each weight uses 4 bytes).
x_dim = 32 << 20
class FakeRunConfig(run_config.RunConfig):
@property
def num_ps_replicas(self):
return partitions
# Mock the device setter as ps is not available on test machines.
with tf.compat.v1.test.mock.patch.object(
estimator,
'_get_replica_device_setter',
return_value=lambda _: '/cpu:0'):
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
config=FakeRunConfig(),
model_dir=self._model_dir)
def _input_fn():
return {
'language':
tf.sparse.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
linear_regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaseLinearRegressorEvaluationTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with tf.Graph().as_default():
tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)
tf.Variable([2.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({
'age': ((1,),)
}, ((10.,),)), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
"""Tests evaluation for batch_size==2."""
with tf.Graph().as_default():
tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)
tf.Variable([2.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({
'age': ((1,), (1,))
}, ((10.,), (10.,))), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
"""Tests evaluation with weights."""
with tf.Graph().as_default():
tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)
tf.Variable([2.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
x_dim = 3
label_dim = 2
with tf.Graph().as_default():
tf.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=AGE_WEIGHT_NAME)
tf.Variable([7.0, 8.0], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age', shape=(x_dim,)),),
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = linear_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is
# [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0]
# [3.0, 4.0]
# [5.0, 6.0]
# which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
def test_evaluation_for_multiple_feature_columns(self):
with tf.Graph().as_default():
tf.Variable([[10.0]], name=AGE_WEIGHT_NAME)
tf.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)
tf.Variable([5.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
batch_size = 2
feature_columns = [
self._fc_lib.numeric_column('age'),
self._fc_lib.numeric_column('height')
]
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([20, 40]),
'height': np.array([4, 8])
},
y=np.array([[213.], [421.]]),
batch_size=batch_size,
num_epochs=None,
shuffle=False)
est = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
eval_metrics = est.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
def test_evaluation_for_multiple_feature_columns_mix(self):
with tf.Graph().as_default():
tf.Variable([[10.0]], name=AGE_WEIGHT_NAME)
tf.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)
tf.Variable([5.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
batch_size = 2
feature_columns = [
feature_column.numeric_column('age'),
tf.feature_column.numeric_column('height')
]
def _input_fn():
features_ds = tf.compat.v1.data.Dataset.from_tensor_slices({
'age': np.array([20, 40]),
'height': np.array([4, 8])
})
labels_ds = tf.compat.v1.data.Dataset.from_tensor_slices(
np.array([[213.], [421.]]))
return (tf.compat.v1.data.Dataset.zip(
(features_ds, labels_ds)).batch(batch_size).repeat(None))
est = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
eval_metrics = est.evaluate(input_fn=_input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaseLinearRegressorPredictTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
"""Tests predict when all variables are one-dimensional."""
with tf.Graph().as_default():
tf.Variable([[10.]], name='linear/linear_model/x/weights')
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('x'),),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[20.2]], predicted_scores)
def testMultiDim(self):
"""Tests predict when all variables are multi-dimenstional."""
batch_size = 2
label_dimension = 3
x_dim = 4
feature_columns = (self._fc_lib.numeric_column('x', shape=(x_dim,)),)
with tf.Graph().as_default():
tf.Variable( # shape=[x_dim, label_dimension]
[[1., 2., 3.], [2., 3., 4.], [3., 4., 5.], [4., 5., 6.]],
name='linear/linear_model/x/weights')
tf.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = x * weight + bias, shape=[batch_size, label_dimension]
self.assertAllClose([[30.2, 40.4, 50.6], [70.2, 96.4, 122.6]],
predicted_scores)
def testTwoFeatureColumns(self):
"""Tests predict with two feature columns."""
with tf.Graph().as_default():
tf.Variable([[10.]], name='linear/linear_model/x0/weights')
tf.Variable([[20.]], name='linear/linear_model/x1/weights')
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('x0'),
self._fc_lib.numeric_column('x1')),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={
'x0': np.array([[2.]]),
'x1': np.array([[3.]])
},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
def testTwoFeatureColumnsMix(self):
"""Tests predict with two feature columns."""
with tf.Graph().as_default():
tf.Variable([[10.]], name='linear/linear_model/x0/weights')
tf.Variable([[20.]], name='linear/linear_model/x1/weights')
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column.numeric_column('x0'),
tf.feature_column.numeric_column('x1')),
model_dir=self._model_dir)
def _predict_input_fn():
return tf.compat.v1.data.Dataset.from_tensor_slices({
'x0': np.array([[2.]]),
'x1': np.array([[3.]])
}).batch(1)
predictions = linear_regressor.predict(input_fn=_predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
def testSparseCombiner(self):
w_a = 2.0
w_b = 3.0
w_c = 5.0
bias = 5.0
with tf.Graph().as_default():
tf.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
1, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
return tf.compat.v1.data.Dataset.from_tensors({
'language':
tf.sparse.SparseTensor(
values=['a', 'c', 'b', 'c'],
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
dense_shape=[2, 2]),
})
feature_columns = (self._fc_lib.categorical_column_with_vocabulary_list(
'language', vocabulary_list=['a', 'b', 'c']),)
# Check prediction for each sparse_combiner.
# With sparse_combiner = 'sum', we have
# logits_1 = w_a + w_c + bias
# = 2.0 + 5.0 + 5.0 = 12.0
# logits_2 = w_b + w_c + bias
# = 3.0 + 5.0 + 5.0 = 13.0
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[12.0], [13.0]], predicted_scores)
# With sparse_combiner = 'mean', we have
# logits_1 = 1/2 * (w_a + w_c) + bias
# = 1/2 * (2.0 + 5.0) + 5.0 = 8.5
# logits_2 = 1/2 * (w_b + w_c) + bias
# = 1/2 * (3.0 + 5.0) + 5.0 = 9.0
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='mean')
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[8.5], [9.0]], predicted_scores)
# With sparse_combiner = 'sqrtn', we have
# logits_1 = sqrt(2)/2 * (w_a + w_c) + bias
# = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974
# logits_2 = sqrt(2)/2 * (w_b + w_c) + bias
# = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='sqrtn')
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[9.94974], [10.65685]], predicted_scores)
class BaseLinearRegressorIntegrationTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
self._fc_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = tf.compat.v1.feature_column.make_parse_example_spec(
feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
"""Tests complete flow with numpy_input_fn."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
"""Tests complete flow with input_fn constructed from parse_example."""
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaseLinearRegressorTrainingTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return tf.compat.v1.assign_add(global_step, 1).op
return tf.no_op()
assert_loss = assert_close(
tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),
loss,
name='assert_loss')
with tf.control_dependencies((assert_loss,)):
if global_step is not None:
return tf.compat.v1.assign_add(global_step, 1).op
return tf.no_op()
mock_optimizer = tf.compat.v1.test.mock.NonCallableMock(
spec=tf.compat.v1.train.Optimizer,
wraps=tf.compat.v1.train.Optimizer(
use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = tf.compat.v1.test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
expected_global_step,
expected_age_weight=None,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in tf.train.list_variables(self._model_dir)
}
self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
tf.train.load_variable(self._model_dir,
tf.compat.v1.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, 1], shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertEqual(expected_age_weight,
tf.train.load_variable(self._model_dir, AGE_WEIGHT_NAME))
self.assertEqual([1], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
tf.train.load_variable(self._model_dir, BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
# Create LinearRegressor.
label = 5.
age = 17
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self._assert_checkpoint(num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
feature_columns = [self._fc_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
feature_columns = [self._fc_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={
'age': data_rank_1,
'w': data_rank_1
},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testFromScratch(self):
# Create LinearRegressor.
label = 5.
age = 17
# loss = (logits - label)^2 = (0 - 5.)^2 = 25.
mock_optimizer = self._mock_optimizer(expected_loss=25.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=num_steps,
expected_age_weight=0.,
expected_bias=0.)
def testFromCheckpoint(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 10. + 5. = 175
# loss = (logits - label)^2 = (175 - 5)^2 = 28900
mock_optimizer = self._mock_optimizer(expected_loss=28900.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((17,),)
}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testFromCheckpointMultiBatch(self):
# Create initial checkpoint.
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias
# logits[0] = 17 * 10. + 5. = 175
# logits[1] = 15 * 10. + 5. = 155
# loss = sum(logits - label)^2 = (175 - 5)^2 + (155 - 3)^2 = 52004
mock_optimizer = self._mock_optimizer(expected_loss=52004.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((17,), (15,))
}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
class BaseLinearClassifierTrainingTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return tf.compat.v1.assign_add(global_step, 1).op
assert_loss = assert_close(
tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),
loss,
name='assert_loss')
with tf.control_dependencies((assert_loss,)):
return tf.compat.v1.assign_add(global_step, 1).op
mock_optimizer = tf.compat.v1.test.mock.NonCallableMock(
spec=tf.compat.v1.train.Optimizer,
wraps=tf.compat.v1.train.Optimizer(
use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = tf.compat.v1.test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
n_classes,
expected_global_step,
expected_age_weight=None,
expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape
for (name, shape) in tf.train.list_variables(self._model_dir)
}
self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
tf.train.load_variable(self._model_dir,
tf.compat.v1.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, logits_dimension], shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertAllEqual(
expected_age_weight,
tf.train.load_variable(self._model_dir, AGE_WEIGHT_NAME))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
tf.train.load_variable(self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={
'age': data_rank_1,
'w': data_rank_2
},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={
'age': data_rank_1,
'w': data_rank_1
},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifier:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifier:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formular
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=(-1 * math.log(1.0 / n_classes)))
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_age_weight=[[0.]] if n_classes == 2 else [[0.] * n_classes],
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = 17 * age_weight + bias and label = 1
# so, loss = 1 * -log ( soft_max(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
"""Tests float labels for binary classification."""
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
age_weight = [[2.0]]
bias = [-35.0]
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17.0, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias
# logits[0] = 17 * 2. - 35. = -1.
# logits[1] = 18.5 * 2. - 35. = 2.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(2) ) = 2.1269
# expected_loss = loss[0] + loss[1]
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = [17, 18.5] * age_weight + bias and label = [1, 0]
# so, loss = 1 * -log ( soft_max(logits)[label] )
# expected_loss = loss[0] + loss[1]
if n_classes == 2:
expected_loss = 1.3133 + 2.1269
else:
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(input_fn=lambda: ({'age': (age)}, (label)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaseLinearClassifierEvaluationTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (np.reshape(
-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-30.0] if n_classes == 2 else [-30.0] * n_classes
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = sum(corss_entropy(41)) = 41.
expected_metrics = {
metric_keys.MetricKeys.LOSS: 41.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 41.,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(
sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics),
rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
"""Tests evaluation for batch_size==2."""
label = [1, 0]
age = [17., 18.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({
'age': (age)
}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
expected_loss = 1.3133 * 2
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.5,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.25,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(
sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics),
rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
"""Tests evaluation with weights."""
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({
'age': (age),
'w': (weights)
}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
# weights = [1., 2.]
expected_loss = 1.3133 * (1. + 2.)
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, 1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE:
(max(label_mean, 1 - label_mean)),
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.1668,
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(
sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics),
rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaseLinearClassifierPredictTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
"""Tests predict when all variables are one-dimensional."""
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (np.reshape(
-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = np.asscalar(
np.reshape(np.array(age_weight) * age + bias, (1,)))
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [0],
'all_class_ids': [0, 1],
'classes': [label_output_fn(0)],
'all_classes': [label_output_fn(0),
label_output_fn(1)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.reshape(np.array(age_weight) * age + bias, (-1,))
class_ids = onedim_logits.argmax()
all_class_ids = list(range(len(onedim_logits)))
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'all_class_ids': all_class_ids,
'classes': [label_output_fn(class_ids)],
'all_classes': [label_output_fn(i) for i in all_class_ids],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllEqual(expected_predictions['all_classes'],
predictions[0]['all_classes'])
expected_predictions.pop('all_classes')
predictions[0].pop('all_classes')
self.assertAllClose(
sorted_key_dict(expected_predictions), sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testSparseCombiner(self):
w_a = 2.0
w_b = 3.0
w_c = 5.0
bias = 5.0
with tf.Graph().as_default():
tf.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
1, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
return tf.compat.v1.data.Dataset.from_tensors({
'language':
tf.sparse.SparseTensor(
values=['a', 'c', 'b', 'c'],
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
dense_shape=[2, 2]),
})
feature_columns = (self._fc_lib.categorical_column_with_vocabulary_list(
'language', vocabulary_list=['a', 'b', 'c']),)
# Check prediction for each sparse_combiner.
# With sparse_combiner = 'sum', we have
# logits_1 = w_a + w_c + bias
# = 2.0 + 5.0 + 5.0 = 12.0
# logits_2 = w_b + w_c + bias
# = 3.0 + 5.0 + 5.0 = 13.0
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[12.0], [13.0]], predicted_scores)
# With sparse_combiner = 'mean', we have
# logits_1 = 1/2 * (w_a + w_c) + bias
# = 1/2 * (2.0 + 5.0) + 5.0 = 8.5
# logits_2 = 1/2 * (w_b + w_c) + bias
# = 1/2 * (3.0 + 5.0) + 5.0 = 9.0
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='mean')
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[8.5], [9.0]], predicted_scores)
# With sparse_combiner = 'sqrtn', we have
# logits_1 = sqrt(2)/2 * (w_a + w_c) + bias
# = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974
# logits_2 = sqrt(2)/2 * (w_b + w_c) + bias
# = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='sqrtn')
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[9.94974], [10.65685]], predicted_scores)
class BaseLinearClassifierIntegrationTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
self._fc_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_classifier_fn(
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = tf.compat.v1.feature_column.make_parse_example_spec(
feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
"""Tests complete flow with numpy_input_fn."""
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
"""Tests complete flow with pandas_input_fn."""
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
"""Tests complete flow with input_fn constructed from parse_example."""
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=x)),
'y':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([1], tf.dtypes.int64),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
class BaseLinearLogitFnTest(object):
def __init__(self, fc_lib=feature_column):
self._fc_lib = fc_lib
def test_basic_logit_correctness(self):
"""linear_logit_fn simply wraps feature_column_lib.linear_model."""
age = self._fc_lib.numeric_column('age')
with tf.Graph().as_default():
logit_fn = linear.linear_logit_fn_builder(units=2, feature_columns=[age])
logits = logit_fn(features={'age': [[23.], [31.]]})
bias_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
'linear_model/bias_weights')[0]
age_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]
with tf.compat.v1.Session() as sess:
sess.run([tf.compat.v1.initializers.global_variables()])
self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
sess.run(bias_var.assign([10., 5.]))
self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())
sess.run(age_var.assign([[2.0, 3.0]]))
# [2 * 23 + 10, 3 * 23 + 5] = [56, 74].
# [2 * 31 + 10, 3 * 31 + 5] = [72, 98]
self.assertAllClose([[56., 74.], [72., 98.]], logits.eval())
def test_compute_fraction_of_zero(self):
"""Tests the calculation of sparsity."""
if self._fc_lib != feature_column:
return
age = tf.feature_column.numeric_column('age')
occupation = feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=5)
with tf.Graph().as_default():
cols_to_vars = {}
tf.compat.v1.feature_column.linear_model(
features={
'age': [[23.], [31.]],
'occupation': [['doctor'], ['engineer']]
},
feature_columns=[age, occupation],
units=3,
cols_to_vars=cols_to_vars)
cols_to_vars.pop('bias')
fraction_zero = linear._compute_fraction_of_zero(
list(cols_to_vars.values()))
age_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]
with tf.compat.v1.Session() as sess:
sess.run([tf.compat.v1.initializers.global_variables()])
# Upon initialization, all variables will be zero.
self.assertAllClose(1, fraction_zero.eval())
sess.run(age_var.assign([[2.0, 0.0, -1.0]]))
# 1 of the 3 age weights are zero, and all of the 15 (5 hash buckets
# x 3-dim output) are zero.
self.assertAllClose(16. / 18., fraction_zero.eval())
def test_compute_fraction_of_zero_v2(self):
"""Tests the calculation of sparsity."""
if self._fc_lib != feature_column_v2:
return
age = tf.feature_column.numeric_column('age')
occupation = tf.feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=5)
with tf.Graph().as_default():
model = feature_column_v2.LinearModel(
feature_columns=[age, occupation], units=3, name='linear_model')
features = {
'age': [[23.], [31.]],
'occupation': [['doctor'], ['engineer']]
}
model(features)
variables = model.variables
variables.remove(model.bias)
fraction_zero = linear._compute_fraction_of_zero(variables)
age_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]
with tf.compat.v1.Session() as sess:
sess.run([tf.compat.v1.initializers.global_variables()])
# Upon initialization, all variables will be zero.
self.assertAllClose(1, fraction_zero.eval())
sess.run(age_var.assign([[2.0, 0.0, -1.0]]))
# 1 of the 3 age weights are zero, and all of the 15 (5 hash buckets
# x 3-dim output) are zero.
self.assertAllClose(16. / 18., fraction_zero.eval())
class BaseLinearWarmStartingTest(object):
def __init__(self,
_linear_classifier_fn,
_linear_regressor_fn,
fc_lib=feature_column):
self._linear_classifier_fn = _linear_classifier_fn
self._linear_regressor_fn = _linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
# Create a directory to save our old checkpoint and vocabularies to.
self._ckpt_and_vocab_dir = tempfile.mkdtemp()
# Make a dummy input_fn.
def _input_fn():
features = {
'age': [[23.], [31.]],
'age_in_years': [[23.], [31.]],
'occupation': [['doctor'], ['consultant']]
}
return features, [0, 1]
self._input_fn = _input_fn
def tearDown(self):
# Clean up checkpoint / vocab dir.
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._ckpt_and_vocab_dir)
def test_classifier_basic_warm_starting(self):
"""Tests correctness of LinearClassifier default warm-start."""
age = self._fc_lib.numeric_column('age')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=linear_classifier.model_dir)
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_linear_classifier.get_variable_names():
self.assertAllClose(
linear_classifier.get_variable_value(variable_name),
warm_started_linear_classifier.get_variable_value(variable_name))
def test_regressor_basic_warm_starting(self):
"""Tests correctness of LinearRegressor default warm-start."""
age = self._fc_lib.numeric_column('age')
# Create a LinearRegressor and train to save a checkpoint.
linear_regressor = self._linear_regressor_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
optimizer='SGD')
linear_regressor.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearRegressor, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_regressor = self._linear_regressor_fn(
feature_columns=[age],
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=linear_regressor.model_dir)
warm_started_linear_regressor.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_linear_regressor.get_variable_names():
self.assertAllClose(
linear_regressor.get_variable_value(variable_name),
warm_started_linear_regressor.get_variable_value(variable_name))
def test_warm_starting_selective_variables(self):
"""Tests selecting variables to warm-start."""
age = self._fc_lib.numeric_column('age')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
# The provided regular expression will only warm-start the age variable
# and not the bias.
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
vars_to_warm_start='.*(age).*'))
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
self.assertAllClose(
linear_classifier.get_variable_value(AGE_WEIGHT_NAME),
warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))
# Bias should still be zero from initialization.
self.assertAllClose(
[0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))
def test_warm_starting_with_vocab_remapping_and_partitioning(self):
"""Tests warm-starting with vocab remapping and partitioning."""
vocab_list = ['doctor', 'lawyer', 'consultant']
vocab_file = os.path.join(self._ckpt_and_vocab_dir, 'occupation_vocab')
with open(vocab_file, 'w') as f:
f.write('\n'.join(vocab_list))
occupation = self._fc_lib.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=vocab_file,
vocabulary_size=len(vocab_list))
# Create a LinearClassifier and train to save a checkpoint.
partitioner = tf.compat.v1.fixed_size_partitioner(num_shards=2)
linear_classifier = self._linear_classifier_fn(
feature_columns=[occupation],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD',
partitioner=partitioner)
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change). Use a new FeatureColumn with a
# different vocabulary for occupation.
new_vocab_list = ['doctor', 'consultant', 'engineer']
new_vocab_file = os.path.join(self._ckpt_and_vocab_dir,
'new_occupation_vocab')
with open(new_vocab_file, 'w') as f:
f.write('\n'.join(new_vocab_list))
new_occupation = self._fc_lib.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=new_vocab_file,
vocabulary_size=len(new_vocab_list))
# We can create our VocabInfo object from the new and old occupation
# FeatureColumn's.
occupation_vocab_info = estimator.VocabInfo(
new_vocab=new_occupation.vocabulary_file,
new_vocab_size=new_occupation.vocabulary_size,
num_oov_buckets=new_occupation.num_oov_buckets,
old_vocab=occupation.vocabulary_file,
old_vocab_size=occupation.vocabulary_size,
# Can't use constant_initializer with load_and_remap. In practice,
# use a truncated normal initializer.
backup_initializer=tf.compat.v1.initializers.random_uniform(
minval=0.39, maxval=0.39))
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[occupation],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
var_name_to_vocab_info={
OCCUPATION_WEIGHT_NAME: occupation_vocab_info
},
# Explicitly providing None here will only warm-start variables
# referenced in var_name_to_vocab_info (the bias will not be
# warm-started).
vars_to_warm_start=None),
partitioner=partitioner)
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# 'doctor' was ID-0 and still ID-0.
self.assertAllClose(
linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[0, :],
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[0, :])
# 'consultant' was ID-2 and now ID-1.
self.assertAllClose(
linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[2, :],
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[1, :])
# 'engineer' is a new entry and should be initialized with the
# backup_initializer in VocabInfo.
self.assertAllClose([0.39] * 4,
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[2, :])
# Bias should still be zero (from initialization logic).
self.assertAllClose(
[0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))
def test_warm_starting_with_naming_change(self):
"""Tests warm-starting with a Tensor name remapping."""
age_in_years = self._fc_lib.numeric_column('age_in_years')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age_in_years],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
# accumulator values that change).
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[self._fc_lib.numeric_column('age')],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
# The 'age' variable correspond to the 'age_in_years' variable in the
# previous model.
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
var_name_to_prev_var_name={
AGE_WEIGHT_NAME: AGE_WEIGHT_NAME.replace('age', 'age_in_years')
}))
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
self.assertAllClose(
linear_classifier.get_variable_value(
AGE_WEIGHT_NAME.replace('age', 'age_in_years')),
warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))
# The bias is also warm-started (with no name remapping).
self.assertAllClose(
linear_classifier.get_variable_value(BIAS_NAME),
warm_started_linear_classifier.get_variable_value(BIAS_NAME))
| 37.78847 | 145 | 0.663015 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import os
import shutil
import tempfile
import numpy as np
import six
import tensorflow as tf
from tensorflow.core.example import example_pb2
from tensorflow.core.example import feature_pb2
from tensorflow.python.feature_column import feature_column
from tensorflow.python.feature_column import feature_column_v2
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables as variables_lib
from tensorflow_estimator.python.estimator import estimator
from tensorflow_estimator.python.estimator import run_config
from tensorflow_estimator.python.estimator.canned import linear
from tensorflow_estimator.python.estimator.canned import metric_keys
from tensorflow_estimator.python.estimator.export import export
from tensorflow_estimator.python.estimator.inputs import numpy_io
from tensorflow_estimator.python.estimator.inputs import pandas_io
try:
import pandas as pd
HAS_PANDAS = True
except IOError:
HAS_PANDAS = False
except ImportError:
HAS_PANDAS = False
# pylint rules which are disabled by default for test files.
# pylint: disable=invalid-name,protected-access,missing-docstring
# Names of variables created by model.
AGE_WEIGHT_NAME = 'linear/linear_model/age/weights'
HEIGHT_WEIGHT_NAME = 'linear/linear_model/height/weights'
OCCUPATION_WEIGHT_NAME = 'linear/linear_model/occupation/weights'
BIAS_NAME = 'linear/linear_model/bias_weights'
LANGUAGE_WEIGHT_NAME = 'linear/linear_model/language/weights'
# This is so that we can easily switch between feature_column and
# feature_column_v2 for testing.
feature_column.numeric_column = feature_column._numeric_column
feature_column.categorical_column_with_hash_bucket = feature_column._categorical_column_with_hash_bucket # pylint: disable=line-too-long
feature_column.categorical_column_with_vocabulary_list = feature_column._categorical_column_with_vocabulary_list # pylint: disable=line-too-long
feature_column.categorical_column_with_vocabulary_file = feature_column._categorical_column_with_vocabulary_file # pylint: disable=line-too-long
feature_column.embedding_column = feature_column._embedding_column
def assert_close(expected, actual, rtol=1e-04, name='assert_close'):
with ops.name_scope(name, 'assert_close', (expected, actual, rtol)) as scope:
expected = ops.convert_to_tensor(expected, name='expected')
actual = ops.convert_to_tensor(actual, name='actual')
rdiff = tf.math.abs(expected - actual, 'diff') / tf.math.abs(expected)
rtol = ops.convert_to_tensor(rtol, name='rtol')
return tf.compat.v1.debugging.assert_less(
rdiff,
rtol,
data=('Condition expected =~ actual did not hold element-wise:'
'expected = ', expected, 'actual = ', actual, 'rdiff = ', rdiff,
'rtol = ', rtol,),
name=scope)
def save_variables_to_ckpt(model_dir):
init_all_op = [tf.compat.v1.initializers.global_variables()]
with tf.compat.v1.Session() as sess:
sess.run(init_all_op)
tf.compat.v1.train.Saver().save(sess, os.path.join(model_dir, 'model.ckpt'))
def queue_parsed_features(feature_map):
tensors_to_enqueue = []
keys = []
for key, tensor in six.iteritems(feature_map):
keys.append(key)
tensors_to_enqueue.append(tensor)
queue_dtypes = [x.dtype for x in tensors_to_enqueue]
input_queue = tf.queue.FIFOQueue(capacity=100, dtypes=queue_dtypes)
tf.compat.v1.train.queue_runner.add_queue_runner(
tf.compat.v1.train.queue_runner.QueueRunner(
input_queue, [input_queue.enqueue(tensors_to_enqueue)]))
dequeued_tensors = input_queue.dequeue()
return {keys[i]: dequeued_tensors[i] for i in range(len(dequeued_tensors))}
def sorted_key_dict(unsorted_dict):
return {k: unsorted_dict[k] for k in sorted(unsorted_dict)}
def sigmoid(x):
return 1 / (1 + np.exp(-1.0 * x))
class CheckPartitionerVarHook(tf.compat.v1.train.SessionRunHook):
def __init__(self, test_case, var_name, var_dim, partitions):
self._test_case = test_case
self._var_name = var_name
self._var_dim = var_dim
self._partitions = partitions
def begin(self):
with tf.compat.v1.variable_scope(
tf.compat.v1.get_variable_scope()) as scope:
scope.reuse_variables()
partitioned_weight = tf.compat.v1.get_variable(
self._var_name, shape=(self._var_dim, 1))
self._test_case.assertTrue(
isinstance(partitioned_weight, variables_lib.PartitionedVariable))
for part in partitioned_weight:
self._test_case.assertEqual(self._var_dim // self._partitions,
part.get_shape()[0])
class BaseLinearRegressorPartitionerTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def testPartitioner(self):
x_dim = 64
partitions = 4
def _partitioner(shape, dtype):
del dtype # unused; required by Fn signature.
# Only partition the embedding tensor.
return [partitions, 1] if shape[0] == x_dim else [1]
regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
partitioner=_partitioner,
model_dir=self._model_dir)
def _input_fn():
return {
'language':
tf.sparse.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
def testDefaultPartitionerWithMultiplePsReplicas(self):
partitions = 2
# This results in weights larger than the default partition size of 64M,
# so partitioned weights are created (each weight uses 4 bytes).
x_dim = 32 << 20
class FakeRunConfig(run_config.RunConfig):
@property
def num_ps_replicas(self):
return partitions
# Mock the device setter as ps is not available on test machines.
with tf.compat.v1.test.mock.patch.object(
estimator,
'_get_replica_device_setter',
return_value=lambda _: '/cpu:0'):
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.categorical_column_with_hash_bucket(
'language', hash_bucket_size=x_dim),),
config=FakeRunConfig(),
model_dir=self._model_dir)
def _input_fn():
return {
'language':
tf.sparse.SparseTensor(
values=['english', 'spanish'],
indices=[[0, 0], [0, 1]],
dense_shape=[1, 2])
}, [[10.]]
hook = CheckPartitionerVarHook(self, LANGUAGE_WEIGHT_NAME, x_dim,
partitions)
linear_regressor.train(input_fn=_input_fn, steps=1, hooks=[hook])
# TODO(b/36813849): Add tests with dynamic shape inputs using placeholders.
class BaseLinearRegressorEvaluationTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_evaluation_for_simple_data(self):
with tf.Graph().as_default():
tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)
tf.Variable([2.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({
'age': ((1,),)
}, ((10.,),)), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10. Loss is 3**2 = 9.
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 9.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_batch(self):
with tf.Graph().as_default():
tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)
tf.Variable([2.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(
input_fn=lambda: ({
'age': ((1,), (1,))
}, ((10.,), (10.,))), steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the sum over batch = 9 + 9 = 18
# Average loss is the average over batch = 9
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 18.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_weights(self):
with tf.Graph().as_default():
tf.Variable([[11.0]], name=AGE_WEIGHT_NAME)
tf.Variable([2.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
features = {'age': ((1,), (1,)), 'weights': ((1.,), (2.,))}
labels = ((10.,), (10.,))
return features, labels
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
weight_column='weights',
model_dir=self._model_dir)
eval_metrics = linear_regressor.evaluate(input_fn=_input_fn, steps=1)
# Logit is (1. * 11.0 + 2.0) = 13, while label is 10.
# Loss per example is 3**2 = 9.
# Training loss is the weighted sum over batch = 9 + 2*9 = 27
# average loss is the weighted average = 9 + 2*9 / (1 + 2) = 9
self.assertDictEqual(
{
metric_keys.MetricKeys.LOSS: 27.,
metric_keys.MetricKeys.LOSS_MEAN: 9.,
metric_keys.MetricKeys.PREDICTION_MEAN: 13.,
metric_keys.MetricKeys.LABEL_MEAN: 10.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100
}, eval_metrics)
def test_evaluation_for_multi_dimensions(self):
x_dim = 3
label_dim = 2
with tf.Graph().as_default():
tf.Variable([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]], name=AGE_WEIGHT_NAME)
tf.Variable([7.0, 8.0], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age', shape=(x_dim,)),),
label_dimension=label_dim,
model_dir=self._model_dir)
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([[2., 4., 5.]]),
},
y=np.array([[46., 58.]]),
batch_size=1,
num_epochs=None,
shuffle=False)
eval_metrics = linear_regressor.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is
# [2., 4., 5.] * [1.0, 2.0] + [7.0, 8.0] = [39, 50] + [7.0, 8.0]
# [3.0, 4.0]
# [5.0, 6.0]
# which is [46, 58]
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
def test_evaluation_for_multiple_feature_columns(self):
with tf.Graph().as_default():
tf.Variable([[10.0]], name=AGE_WEIGHT_NAME)
tf.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)
tf.Variable([5.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
batch_size = 2
feature_columns = [
self._fc_lib.numeric_column('age'),
self._fc_lib.numeric_column('height')
]
input_fn = numpy_io.numpy_input_fn(
x={
'age': np.array([20, 40]),
'height': np.array([4, 8])
},
y=np.array([[213.], [421.]]),
batch_size=batch_size,
num_epochs=None,
shuffle=False)
est = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
eval_metrics = est.evaluate(input_fn=input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
def test_evaluation_for_multiple_feature_columns_mix(self):
with tf.Graph().as_default():
tf.Variable([[10.0]], name=AGE_WEIGHT_NAME)
tf.Variable([[2.0]], name=HEIGHT_WEIGHT_NAME)
tf.Variable([5.0], name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
batch_size = 2
feature_columns = [
feature_column.numeric_column('age'),
tf.feature_column.numeric_column('height')
]
def _input_fn():
features_ds = tf.compat.v1.data.Dataset.from_tensor_slices({
'age': np.array([20, 40]),
'height': np.array([4, 8])
})
labels_ds = tf.compat.v1.data.Dataset.from_tensor_slices(
np.array([[213.], [421.]]))
return (tf.compat.v1.data.Dataset.zip(
(features_ds, labels_ds)).batch(batch_size).repeat(None))
est = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
eval_metrics = est.evaluate(input_fn=_input_fn, steps=1)
self.assertItemsEqual(
(metric_keys.MetricKeys.LOSS, metric_keys.MetricKeys.LOSS_MEAN,
metric_keys.MetricKeys.PREDICTION_MEAN,
metric_keys.MetricKeys.LABEL_MEAN, tf.compat.v1.GraphKeys.GLOBAL_STEP),
eval_metrics.keys())
# Logit is [(20. * 10.0 + 4 * 2.0 + 5.0), (40. * 10.0 + 8 * 2.0 + 5.0)] =
# [213.0, 421.0], while label is [213., 421.]. Loss = 0.
self.assertAlmostEqual(0, eval_metrics[metric_keys.MetricKeys.LOSS])
class BaseLinearRegressorPredictTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def test_1d(self):
with tf.Graph().as_default():
tf.Variable([[10.]], name='linear/linear_model/x/weights')
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('x'),),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': np.array([[2.]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x * weight + bias = 2. * 10. + .2 = 20.2
self.assertAllClose([[20.2]], predicted_scores)
def testMultiDim(self):
batch_size = 2
label_dimension = 3
x_dim = 4
feature_columns = (self._fc_lib.numeric_column('x', shape=(x_dim,)),)
with tf.Graph().as_default():
tf.Variable( # shape=[x_dim, label_dimension]
[[1., 2., 3.], [2., 3., 4.], [3., 4., 5.], [4., 5., 6.]],
name='linear/linear_model/x/weights')
tf.Variable( # shape=[label_dimension]
[.2, .4, .6], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
# x shape=[batch_size, x_dim]
x={'x': np.array([[1., 2., 3., 4.], [5., 6., 7., 8.]])},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# score = x * weight + bias, shape=[batch_size, label_dimension]
self.assertAllClose([[30.2, 40.4, 50.6], [70.2, 96.4, 122.6]],
predicted_scores)
def testTwoFeatureColumns(self):
with tf.Graph().as_default():
tf.Variable([[10.]], name='linear/linear_model/x0/weights')
tf.Variable([[20.]], name='linear/linear_model/x1/weights')
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('x0'),
self._fc_lib.numeric_column('x1')),
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={
'x0': np.array([[2.]]),
'x1': np.array([[3.]])
},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = linear_regressor.predict(input_fn=predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
def testTwoFeatureColumnsMix(self):
with tf.Graph().as_default():
tf.Variable([[10.]], name='linear/linear_model/x0/weights')
tf.Variable([[20.]], name='linear/linear_model/x1/weights')
tf.Variable([.2], name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
linear_regressor = self._linear_regressor_fn(
feature_columns=(feature_column.numeric_column('x0'),
tf.feature_column.numeric_column('x1')),
model_dir=self._model_dir)
def _predict_input_fn():
return tf.compat.v1.data.Dataset.from_tensor_slices({
'x0': np.array([[2.]]),
'x1': np.array([[3.]])
}).batch(1)
predictions = linear_regressor.predict(input_fn=_predict_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
# x0 * weight0 + x1 * weight1 + bias = 2. * 10. + 3. * 20 + .2 = 80.2
self.assertAllClose([[80.2]], predicted_scores)
def testSparseCombiner(self):
w_a = 2.0
w_b = 3.0
w_c = 5.0
bias = 5.0
with tf.Graph().as_default():
tf.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
1, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
return tf.compat.v1.data.Dataset.from_tensors({
'language':
tf.sparse.SparseTensor(
values=['a', 'c', 'b', 'c'],
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
dense_shape=[2, 2]),
})
feature_columns = (self._fc_lib.categorical_column_with_vocabulary_list(
'language', vocabulary_list=['a', 'b', 'c']),)
# Check prediction for each sparse_combiner.
# With sparse_combiner = 'sum', we have
# logits_1 = w_a + w_c + bias
# = 2.0 + 5.0 + 5.0 = 12.0
# logits_2 = w_b + w_c + bias
# = 3.0 + 5.0 + 5.0 = 13.0
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[12.0], [13.0]], predicted_scores)
# With sparse_combiner = 'mean', we have
# logits_1 = 1/2 * (w_a + w_c) + bias
# = 1/2 * (2.0 + 5.0) + 5.0 = 8.5
# logits_2 = 1/2 * (w_b + w_c) + bias
# = 1/2 * (3.0 + 5.0) + 5.0 = 9.0
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='mean')
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[8.5], [9.0]], predicted_scores)
# With sparse_combiner = 'sqrtn', we have
# logits_1 = sqrt(2)/2 * (w_a + w_c) + bias
# = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974
# logits_2 = sqrt(2)/2 * (w_b + w_c) + bias
# = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685
linear_regressor = self._linear_regressor_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='sqrtn')
predictions = linear_regressor.predict(input_fn=_input_fn)
predicted_scores = list([x['predictions'] for x in predictions])
self.assertAllClose([[9.94974], [10.65685]], predicted_scores)
class BaseLinearRegressorIntegrationTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, train_input_fn, eval_input_fn, predict_input_fn,
input_dimension, label_dimension, prediction_length):
feature_columns = [
self._fc_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['predictions'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, label_dimension), predictions.shape)
# EXPORT
feature_spec = tf.compat.v1.feature_column.make_parse_example_spec(
feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def test_numpy_input_fn(self):
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=data,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_pandas_input_fn(self):
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
label_dimension = 1
input_dimension = label_dimension
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
x = pd.DataFrame({'x': data})
y = pd.Series(data)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
def test_input_fn_from_parse_example(self):
label_dimension = 2
input_dimension = label_dimension
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * label_dimension, dtype=np.float32)
data = data.reshape(batch_size, label_dimension)
serialized_examples = []
for datum in data:
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=datum)),
'y':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(
value=datum[:label_dimension])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([label_dimension], tf.dtypes.float32),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
label_dimension=label_dimension,
prediction_length=prediction_length)
class BaseLinearRegressorTrainingTest(object):
def __init__(self, linear_regressor_fn, fc_lib=feature_column):
self._linear_regressor_fn = linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step=None, var_list=None):
trainable_vars = var_list or tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
# Verify loss. We can't check the value directly, so we add an assert op.
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
if global_step is not None:
return tf.compat.v1.assign_add(global_step, 1).op
return tf.no_op()
assert_loss = assert_close(
tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),
loss,
name='assert_loss')
with tf.control_dependencies((assert_loss,)):
if global_step is not None:
return tf.compat.v1.assign_add(global_step, 1).op
return tf.no_op()
mock_optimizer = tf.compat.v1.test.mock.NonCallableMock(
spec=tf.compat.v1.train.Optimizer,
wraps=tf.compat.v1.train.Optimizer(
use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = tf.compat.v1.test.mock.MagicMock(wraps=_minimize)
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
expected_global_step,
expected_age_weight=None,
expected_bias=None):
shapes = {
name: shape
for (name, shape) in tf.train.list_variables(self._model_dir)
}
self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
tf.train.load_variable(self._model_dir,
tf.compat.v1.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, 1], shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertEqual(expected_age_weight,
tf.train.load_variable(self._model_dir, AGE_WEIGHT_NAME))
self.assertEqual([1], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertEqual(expected_bias,
tf.train.load_variable(self._model_dir, BIAS_NAME))
def testFromScratchWithDefaultOptimizer(self):
label = 5.
age = 17
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir)
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self._assert_checkpoint(num_steps)
def testTrainWithOneDimLabel(self):
label_dimension = 1
batch_size = 20
feature_columns = [self._fc_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testTrainWithOneDimWeight(self):
label_dimension = 1
batch_size = 20
feature_columns = [self._fc_lib.numeric_column('age', shape=(1,))]
est = self._linear_regressor_fn(
feature_columns=feature_columns,
label_dimension=label_dimension,
weight_column='w',
model_dir=self._model_dir)
data_rank_1 = np.linspace(0., 2., batch_size, dtype=np.float32)
self.assertEqual((batch_size,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={
'age': data_rank_1,
'w': data_rank_1
},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(200)
def testFromScratch(self):
label = 5.
age = 17
mock_optimizer = self._mock_optimizer(expected_loss=25.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=num_steps,
expected_age_weight=0.,
expected_bias=0.)
def testFromCheckpoint(self):
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
mock_optimizer = self._mock_optimizer(expected_loss=28900.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((17,),)
}, ((5.,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testFromCheckpointMultiBatch(self):
age_weight = 10.0
bias = 5.0
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable([[age_weight]], name=AGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
mock_optimizer = self._mock_optimizer(expected_loss=52004.)
linear_regressor = self._linear_regressor_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
model_dir=self._model_dir,
optimizer=mock_optimizer)
self.assertEqual(0, mock_optimizer.minimize.call_count)
num_steps = 10
linear_regressor.train(
input_fn=lambda: ({
'age': ((17,), (15,))
}, ((5.,), (3.,))),
steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
class BaseLinearClassifierTrainingTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _mock_optimizer(self, expected_loss=None):
expected_var_names = [
'%s/part_0:0' % AGE_WEIGHT_NAME,
'%s/part_0:0' % BIAS_NAME
]
def _minimize(loss, global_step):
trainable_vars = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES)
self.assertItemsEqual(expected_var_names,
[var.name for var in trainable_vars])
self.assertEquals(0, loss.shape.ndims)
if expected_loss is None:
return tf.compat.v1.assign_add(global_step, 1).op
assert_loss = assert_close(
tf.cast(expected_loss, name='expected', dtype=tf.dtypes.float32),
loss,
name='assert_loss')
with tf.control_dependencies((assert_loss,)):
return tf.compat.v1.assign_add(global_step, 1).op
mock_optimizer = tf.compat.v1.test.mock.NonCallableMock(
spec=tf.compat.v1.train.Optimizer,
wraps=tf.compat.v1.train.Optimizer(
use_locking=False, name='my_optimizer'))
mock_optimizer.minimize = tf.compat.v1.test.mock.MagicMock(wraps=_minimize)
# NOTE: Estimator.params performs a deepcopy, which wreaks havoc with mocks.
# So, return mock_optimizer itself for deepcopy.
mock_optimizer.__deepcopy__ = lambda _: mock_optimizer
return mock_optimizer
def _assert_checkpoint(self,
n_classes,
expected_global_step,
expected_age_weight=None,
expected_bias=None):
logits_dimension = n_classes if n_classes > 2 else 1
shapes = {
name: shape
for (name, shape) in tf.train.list_variables(self._model_dir)
}
self.assertEqual([], shapes[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertEqual(
expected_global_step,
tf.train.load_variable(self._model_dir,
tf.compat.v1.GraphKeys.GLOBAL_STEP))
self.assertEqual([1, logits_dimension], shapes[AGE_WEIGHT_NAME])
if expected_age_weight is not None:
self.assertAllEqual(
expected_age_weight,
tf.train.load_variable(self._model_dir, AGE_WEIGHT_NAME))
self.assertEqual([logits_dimension], shapes[BIAS_NAME])
if expected_bias is not None:
self.assertAllEqual(expected_bias,
tf.train.load_variable(self._model_dir, BIAS_NAME))
def _testFromScratchWithDefaultOptimizer(self, n_classes):
label = 0
age = 17
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
# Train for a few steps, and validate final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self._assert_checkpoint(n_classes, num_steps)
def testBinaryClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=2)
def testMultiClassesFromScratchWithDefaultOptimizer(self):
self._testFromScratchWithDefaultOptimizer(n_classes=4)
def _testTrainWithTwoDimsLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_2,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=2)
def testMultiClassesTrainWithTwoDimsLabel(self):
self._testTrainWithTwoDimsLabel(n_classes=4)
def _testTrainWithOneDimLabel(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={'age': data_rank_1},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=2)
def testMultiClassesTrainWithOneDimLabel(self):
self._testTrainWithOneDimLabel(n_classes=4)
def _testTrainWithTwoDimsWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
data_rank_2 = np.array([[0], [1]])
self.assertEqual((2,), data_rank_1.shape)
self.assertEqual((2, 1), data_rank_2.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={
'age': data_rank_1,
'w': data_rank_2
},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=2)
def testMultiClassesTrainWithTwoDimsWeight(self):
self._testTrainWithTwoDimsWeight(n_classes=4)
def _testTrainWithOneDimWeight(self, n_classes):
batch_size = 20
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
weight_column='w',
n_classes=n_classes,
model_dir=self._model_dir)
data_rank_1 = np.array([0, 1])
self.assertEqual((2,), data_rank_1.shape)
train_input_fn = numpy_io.numpy_input_fn(
x={
'age': data_rank_1,
'w': data_rank_1
},
y=data_rank_1,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
est.train(train_input_fn, steps=200)
self._assert_checkpoint(n_classes, 200)
def testBinaryClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=2)
def testMultiClassesTrainWithOneDimWeight(self):
self._testTrainWithOneDimWeight(n_classes=4)
def _testFromScratch(self, n_classes):
label = 1
age = 17
# For binary classifier:
# loss = sigmoid_cross_entropy(logits, label) where logits=0 (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( sigmoid(logits) ) = 0.69315
# For multi class classifier:
# loss = cross_entropy(logits, label) where logits are all 0s (weights are
# all zero initially) and label = 1 so,
# loss = 1 * -log ( 1.0 / n_classes )
# For this particular test case, as logits are same, the formular
# 1 * -log ( 1.0 / n_classes ) covers both binary and multi class cases.
mock_optimizer = self._mock_optimizer(
expected_loss=(-1 * math.log(1.0 / n_classes)))
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=num_steps,
expected_age_weight=[[0.]] if n_classes == 2 else [[0.] * n_classes],
expected_bias=[0.] if n_classes == 2 else [.0] * n_classes)
def testBinaryClassesFromScratch(self):
self._testFromScratch(n_classes=2)
def testMultiClassesFromScratch(self):
self._testFromScratch(n_classes=4)
def _testFromCheckpoint(self, n_classes):
# Create initial checkpoint.
label = 1
age = 17
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss = 1 * -log ( sigmoid(-1) ) = 1.3133
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = 17 * age_weight + bias and label = 1
# so, loss = 1 * -log ( soft_max(logits)[1] )
if n_classes == 2:
expected_loss = 1.3133
else:
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=2)
def testMultiClassesFromCheckpoint(self):
self._testFromCheckpoint(n_classes=4)
def _testFromCheckpointFloatLabels(self, n_classes):
# Create initial checkpoint.
if n_classes > 2:
return
label = 0.8
age = 17
age_weight = [[2.0]]
bias = [-35.0]
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# logits = age * age_weight + bias = 17 * 2. - 35. = -1.
# loss = sigmoid_cross_entropy(logits, label)
# => loss = -0.8 * log(sigmoid(-1)) -0.2 * log(sigmoid(+1)) = 1.1132617
mock_optimizer = self._mock_optimizer(expected_loss=1.1132617)
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
def testBinaryClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=2)
def testMultiClassesFromCheckpointFloatLabels(self):
self._testFromCheckpointFloatLabels(n_classes=4)
def _testFromCheckpointMultiBatch(self, n_classes):
# Create initial checkpoint.
label = [1, 0]
age = [17.0, 18.5]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
# For binary classifier:
# logits = age * age_weight + bias
# logits[0] = 17 * 2. - 35. = -1.
# logits[1] = 18.5 * 2. - 35. = 2.
# loss = sigmoid_cross_entropy(logits, label)
# so, loss[0] = 1 * -log ( sigmoid(-1) ) = 1.3133
# loss[1] = (1 - 0) * -log ( 1- sigmoid(2) ) = 2.1269
# expected_loss = loss[0] + loss[1]
# For multi class classifier:
# loss = cross_entropy(logits, label)
# where logits = [17, 18.5] * age_weight + bias and label = [1, 0]
# so, loss = 1 * -log ( soft_max(logits)[label] )
# expected_loss = loss[0] + loss[1]
if n_classes == 2:
expected_loss = 1.3133 + 2.1269
else:
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
mock_optimizer = self._mock_optimizer(expected_loss=expected_loss)
est = linear.LinearClassifier(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
optimizer=mock_optimizer,
model_dir=self._model_dir)
self.assertEqual(0, mock_optimizer.minimize.call_count)
# Train for a few steps, and validate optimizer and final checkpoint.
num_steps = 10
est.train(input_fn=lambda: ({'age': (age)}, (label)), steps=num_steps)
self.assertEqual(1, mock_optimizer.minimize.call_count)
self._assert_checkpoint(
n_classes,
expected_global_step=initial_global_step + num_steps,
expected_age_weight=age_weight,
expected_bias=bias)
def testBinaryClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=2)
def testMultiClassesFromCheckpointMultiBatch(self):
self._testFromCheckpointMultiBatch(n_classes=4)
class BaseLinearClassifierEvaluationTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_evaluation_for_simple_data(self, n_classes):
label = 1
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (np.reshape(
-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-30.0] if n_classes == 2 else [-30.0] * n_classes
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
100, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({
'age': ((age,),)
}, ((label,),)), steps=1)
if n_classes == 2:
# Binary classes: loss = sum(corss_entropy(41)) = 41.
expected_metrics = {
metric_keys.MetricKeys.LOSS: 41.,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: 41.,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.,
metric_keys.MetricKeys.LABEL_MEAN: 1.,
metric_keys.MetricKeys.ACCURACY_BASELINE: 1,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 1.,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * age + bias
logits_exp = np.exp(logits)
softmax = logits_exp / logits_exp.sum()
expected_loss = -1 * math.log(softmax[0, label])
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(
sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics),
rtol=1e-3)
def test_binary_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=2)
def test_multi_classes_evaluation_for_simple_data(self):
self._test_evaluation_for_simple_data(n_classes=4)
def _test_evaluation_batch(self, n_classes):
label = [1, 0]
age = [17., 18.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({
'age': (age)
}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
expected_loss = 1.3133 * 2
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: 0.5,
metric_keys.MetricKeys.LABEL_MEAN: 0.5,
metric_keys.MetricKeys.ACCURACY_BASELINE: 0.5,
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.25,
}
else:
# Multi classes: loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
expected_loss = expected_loss_0 + expected_loss_1
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: expected_loss / 2,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(
sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics),
rtol=1e-3)
def test_binary_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=2)
def test_multi_classes_evaluation_batch(self):
self._test_evaluation_batch(n_classes=4)
def _test_evaluation_weights(self, n_classes):
label = [1, 0]
age = [17., 18.]
weights = [1., 2.]
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[2.0]] if n_classes == 2 else (np.reshape(
2.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [-35.0] if n_classes == 2 else [-35.0] * n_classes
initial_global_step = 100
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(
initial_global_step,
name=tf.compat.v1.GraphKeys.GLOBAL_STEP,
dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
n_classes=n_classes,
weight_column='w',
model_dir=self._model_dir)
eval_metrics = est.evaluate(
input_fn=lambda: ({
'age': (age),
'w': (weights)
}, (label)), steps=1)
if n_classes == 2:
# Logits are (-1., 1.) labels are (1, 0).
# Loss is
# loss for row 1: 1 * -log(sigmoid(-1)) = 1.3133
# loss for row 2: (1 - 0) * -log(1 - sigmoid(1)) = 1.3133
# weights = [1., 2.]
expected_loss = 1.3133 * (1. + 2.)
loss_mean = expected_loss / (1.0 + 2.0)
label_mean = np.average(label, weights=weights)
logits = [-1, 1]
logistics = sigmoid(np.array(logits))
predictions_mean = np.average(logistics, weights=weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
metric_keys.MetricKeys.ACCURACY: 0.,
metric_keys.MetricKeys.PRECISION: 0.,
metric_keys.MetricKeys.RECALL: 0.,
metric_keys.MetricKeys.PREDICTION_MEAN: predictions_mean,
metric_keys.MetricKeys.LABEL_MEAN: label_mean,
metric_keys.MetricKeys.ACCURACY_BASELINE:
(max(label_mean, 1 - label_mean)),
metric_keys.MetricKeys.AUC: 0.,
metric_keys.MetricKeys.AUC_PR: 0.1668,
}
else:
# Multi classes: unweighted_loss = 1 * -log ( soft_max(logits)[label] )
logits = age_weight * np.reshape(age, (2, 1)) + bias
logits_exp = np.exp(logits)
softmax_row_0 = logits_exp[0] / logits_exp[0].sum()
softmax_row_1 = logits_exp[1] / logits_exp[1].sum()
expected_loss_0 = -1 * math.log(softmax_row_0[label[0]])
expected_loss_1 = -1 * math.log(softmax_row_1[label[1]])
loss_mean = np.average([expected_loss_0, expected_loss_1],
weights=weights)
expected_loss = loss_mean * np.sum(weights)
expected_metrics = {
metric_keys.MetricKeys.LOSS: expected_loss,
metric_keys.MetricKeys.LOSS_MEAN: loss_mean,
tf.compat.v1.GraphKeys.GLOBAL_STEP: 100,
metric_keys.MetricKeys.ACCURACY: 0.,
}
self.assertAllClose(
sorted_key_dict(expected_metrics),
sorted_key_dict(eval_metrics),
rtol=1e-3)
def test_binary_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=2)
def test_multi_classes_evaluation_weights(self):
self._test_evaluation_weights(n_classes=4)
class BaseLinearClassifierPredictTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _testPredictions(self, n_classes, label_vocabulary, label_output_fn):
age = 1.
# For binary case, the expected weight has shape (1,1). For multi class
# case, the shape is (1, n_classes). In order to test the weights, set
# weights as 2.0 * range(n_classes).
age_weight = [[-11.0]] if n_classes == 2 else (np.reshape(
-11.0 * np.array(list(range(n_classes)), dtype=np.float32),
(1, n_classes)))
bias = [10.0] if n_classes == 2 else [10.0] * n_classes
with tf.Graph().as_default():
tf.Variable(age_weight, name=AGE_WEIGHT_NAME)
tf.Variable(bias, name=BIAS_NAME)
tf.Variable(100, name='global_step', dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
est = self._linear_classifier_fn(
feature_columns=(self._fc_lib.numeric_column('age'),),
label_vocabulary=label_vocabulary,
n_classes=n_classes,
model_dir=self._model_dir)
predict_input_fn = numpy_io.numpy_input_fn(
x={'age': np.array([[age]])},
y=None,
batch_size=1,
num_epochs=1,
shuffle=False)
predictions = list(est.predict(input_fn=predict_input_fn))
if n_classes == 2:
scalar_logits = np.asscalar(
np.reshape(np.array(age_weight) * age + bias, (1,)))
two_classes_logits = [0, scalar_logits]
two_classes_logits_exp = np.exp(two_classes_logits)
softmax = two_classes_logits_exp / two_classes_logits_exp.sum()
expected_predictions = {
'class_ids': [0],
'all_class_ids': [0, 1],
'classes': [label_output_fn(0)],
'all_classes': [label_output_fn(0),
label_output_fn(1)],
'logistic': [sigmoid(np.array(scalar_logits))],
'logits': [scalar_logits],
'probabilities': softmax,
}
else:
onedim_logits = np.reshape(np.array(age_weight) * age + bias, (-1,))
class_ids = onedim_logits.argmax()
all_class_ids = list(range(len(onedim_logits)))
logits_exp = np.exp(onedim_logits)
softmax = logits_exp / logits_exp.sum()
expected_predictions = {
'class_ids': [class_ids],
'all_class_ids': all_class_ids,
'classes': [label_output_fn(class_ids)],
'all_classes': [label_output_fn(i) for i in all_class_ids],
'logits': onedim_logits,
'probabilities': softmax,
}
self.assertEqual(1, len(predictions))
# assertAllClose cannot handle byte type.
self.assertEqual(expected_predictions['classes'], predictions[0]['classes'])
expected_predictions.pop('classes')
predictions[0].pop('classes')
self.assertAllEqual(expected_predictions['all_classes'],
predictions[0]['all_classes'])
expected_predictions.pop('all_classes')
predictions[0].pop('all_classes')
self.assertAllClose(
sorted_key_dict(expected_predictions), sorted_key_dict(predictions[0]))
def testBinaryClassesWithoutLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testBinaryClassesWithLabelVocabulary(self):
n_classes = 2
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testMultiClassesWithoutLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=None,
label_output_fn=lambda x: ('%s' % x).encode())
def testMultiClassesWithLabelVocabulary(self):
n_classes = 4
self._testPredictions(
n_classes,
label_vocabulary=['class_vocab_{}'.format(i) for i in range(n_classes)],
label_output_fn=lambda x: ('class_vocab_%s' % x).encode())
def testSparseCombiner(self):
w_a = 2.0
w_b = 3.0
w_c = 5.0
bias = 5.0
with tf.Graph().as_default():
tf.Variable([[w_a], [w_b], [w_c]], name=LANGUAGE_WEIGHT_NAME)
tf.Variable([bias], name=BIAS_NAME)
tf.Variable(
1, name=tf.compat.v1.GraphKeys.GLOBAL_STEP, dtype=tf.dtypes.int64)
save_variables_to_ckpt(self._model_dir)
def _input_fn():
return tf.compat.v1.data.Dataset.from_tensors({
'language':
tf.sparse.SparseTensor(
values=['a', 'c', 'b', 'c'],
indices=[[0, 0], [0, 1], [1, 0], [1, 1]],
dense_shape=[2, 2]),
})
feature_columns = (self._fc_lib.categorical_column_with_vocabulary_list(
'language', vocabulary_list=['a', 'b', 'c']),)
# Check prediction for each sparse_combiner.
# With sparse_combiner = 'sum', we have
# logits_1 = w_a + w_c + bias
# = 2.0 + 5.0 + 5.0 = 12.0
# logits_2 = w_b + w_c + bias
# = 3.0 + 5.0 + 5.0 = 13.0
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns, model_dir=self._model_dir)
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[12.0], [13.0]], predicted_scores)
# With sparse_combiner = 'mean', we have
# logits_1 = 1/2 * (w_a + w_c) + bias
# = 1/2 * (2.0 + 5.0) + 5.0 = 8.5
# logits_2 = 1/2 * (w_b + w_c) + bias
# = 1/2 * (3.0 + 5.0) + 5.0 = 9.0
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='mean')
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[8.5], [9.0]], predicted_scores)
# With sparse_combiner = 'sqrtn', we have
# logits_1 = sqrt(2)/2 * (w_a + w_c) + bias
# = sqrt(2)/2 * (2.0 + 5.0) + 5.0 = 9.94974
# logits_2 = sqrt(2)/2 * (w_b + w_c) + bias
# = sqrt(2)/2 * (3.0 + 5.0) + 5.0 = 10.65685
linear_classifier = self._linear_classifier_fn(
feature_columns=feature_columns,
model_dir=self._model_dir,
sparse_combiner='sqrtn')
predictions = linear_classifier.predict(input_fn=_input_fn)
predicted_scores = list([x['logits'] for x in predictions])
self.assertAllClose([[9.94974], [10.65685]], predicted_scores)
class BaseLinearClassifierIntegrationTest(object):
def __init__(self, linear_classifier_fn, fc_lib=feature_column):
self._linear_classifier_fn = linear_classifier_fn
self._fc_lib = fc_lib
def setUp(self):
self._model_dir = tempfile.mkdtemp()
def tearDown(self):
if self._model_dir:
shutil.rmtree(self._model_dir)
def _test_complete_flow(self, n_classes, train_input_fn, eval_input_fn,
predict_input_fn, input_dimension, prediction_length):
feature_columns = [
self._fc_lib.numeric_column('x', shape=(input_dimension,))
]
est = self._linear_classifier_fn(
feature_columns=feature_columns,
n_classes=n_classes,
model_dir=self._model_dir)
# TRAIN
# learn y = x
est.train(train_input_fn, steps=200)
# EVALUTE
scores = est.evaluate(eval_input_fn)
self.assertEqual(200, scores[tf.compat.v1.GraphKeys.GLOBAL_STEP])
self.assertIn(metric_keys.MetricKeys.LOSS, six.iterkeys(scores))
# PREDICT
predictions = np.array(
[x['classes'] for x in est.predict(predict_input_fn)])
self.assertAllEqual((prediction_length, 1), predictions.shape)
# EXPORT
feature_spec = tf.compat.v1.feature_column.make_parse_example_spec(
feature_columns)
serving_input_receiver_fn = export.build_parsing_serving_input_receiver_fn(
feature_spec)
export_dir = est.export_saved_model(tempfile.mkdtemp(),
serving_input_receiver_fn)
self.assertTrue(tf.compat.v1.gfile.Exists(export_dir))
def _test_numpy_input_fn(self, n_classes):
input_dimension = 4
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size)
train_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=None,
shuffle=True)
eval_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=target,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
predict_input_fn = numpy_io.numpy_input_fn(
x={'x': data},
y=None,
batch_size=batch_size,
num_epochs=1,
shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=2)
def test_multi_classes_numpy_input_fn(self):
self._test_numpy_input_fn(n_classes=4)
def _test_pandas_input_fn(self, n_classes):
if not HAS_PANDAS:
return
# Pandas DataFrame natually supports 1 dim data only.
input_dimension = 1
batch_size = 10
data = np.array([1., 2., 3., 4.], dtype=np.float32)
target = np.array([1, 0, 1, 0], dtype=np.int32)
x = pd.DataFrame({'x': data})
y = pd.Series(target)
prediction_length = 4
train_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, num_epochs=None, shuffle=True)
eval_input_fn = pandas_io.pandas_input_fn(
x=x, y=y, batch_size=batch_size, shuffle=False)
predict_input_fn = pandas_io.pandas_input_fn(
x=x, batch_size=batch_size, shuffle=False)
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=train_input_fn,
eval_input_fn=eval_input_fn,
predict_input_fn=predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=2)
def test_multi_classes_pandas_input_fn(self):
self._test_pandas_input_fn(n_classes=4)
def _test_input_fn_from_parse_example(self, n_classes):
input_dimension = 2
batch_size = 10
prediction_length = batch_size
data = np.linspace(0., 2., batch_size * input_dimension, dtype=np.float32)
data = data.reshape(batch_size, input_dimension)
target = np.array([1] * batch_size, dtype=np.int64)
serialized_examples = []
for x, y in zip(data, target):
example = example_pb2.Example(
features=feature_pb2.Features(
feature={
'x':
feature_pb2.Feature(
float_list=feature_pb2.FloatList(value=x)),
'y':
feature_pb2.Feature(
int64_list=feature_pb2.Int64List(value=[y])),
}))
serialized_examples.append(example.SerializeToString())
feature_spec = {
'x': tf.io.FixedLenFeature([input_dimension], tf.dtypes.float32),
'y': tf.io.FixedLenFeature([1], tf.dtypes.int64),
}
def _train_input_fn():
feature_map = tf.compat.v1.io.parse_example(serialized_examples,
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _eval_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
labels = features.pop('y')
return features, labels
def _predict_input_fn():
feature_map = tf.compat.v1.io.parse_example(
tf.compat.v1.train.limit_epochs(serialized_examples, num_epochs=1),
feature_spec)
features = queue_parsed_features(feature_map)
features.pop('y')
return features, None
self._test_complete_flow(
n_classes=n_classes,
train_input_fn=_train_input_fn,
eval_input_fn=_eval_input_fn,
predict_input_fn=_predict_input_fn,
input_dimension=input_dimension,
prediction_length=prediction_length)
def test_binary_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=2)
def test_multi_classes_input_fn_from_parse_example(self):
self._test_input_fn_from_parse_example(n_classes=4)
class BaseLinearLogitFnTest(object):
def __init__(self, fc_lib=feature_column):
self._fc_lib = fc_lib
def test_basic_logit_correctness(self):
age = self._fc_lib.numeric_column('age')
with tf.Graph().as_default():
logit_fn = linear.linear_logit_fn_builder(units=2, feature_columns=[age])
logits = logit_fn(features={'age': [[23.], [31.]]})
bias_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
'linear_model/bias_weights')[0]
age_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]
with tf.compat.v1.Session() as sess:
sess.run([tf.compat.v1.initializers.global_variables()])
self.assertAllClose([[0., 0.], [0., 0.]], logits.eval())
sess.run(bias_var.assign([10., 5.]))
self.assertAllClose([[10., 5.], [10., 5.]], logits.eval())
sess.run(age_var.assign([[2.0, 3.0]]))
# [2 * 23 + 10, 3 * 23 + 5] = [56, 74].
# [2 * 31 + 10, 3 * 31 + 5] = [72, 98]
self.assertAllClose([[56., 74.], [72., 98.]], logits.eval())
def test_compute_fraction_of_zero(self):
if self._fc_lib != feature_column:
return
age = tf.feature_column.numeric_column('age')
occupation = feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=5)
with tf.Graph().as_default():
cols_to_vars = {}
tf.compat.v1.feature_column.linear_model(
features={
'age': [[23.], [31.]],
'occupation': [['doctor'], ['engineer']]
},
feature_columns=[age, occupation],
units=3,
cols_to_vars=cols_to_vars)
cols_to_vars.pop('bias')
fraction_zero = linear._compute_fraction_of_zero(
list(cols_to_vars.values()))
age_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]
with tf.compat.v1.Session() as sess:
sess.run([tf.compat.v1.initializers.global_variables()])
# Upon initialization, all variables will be zero.
self.assertAllClose(1, fraction_zero.eval())
sess.run(age_var.assign([[2.0, 0.0, -1.0]]))
# 1 of the 3 age weights are zero, and all of the 15 (5 hash buckets
# x 3-dim output) are zero.
self.assertAllClose(16. / 18., fraction_zero.eval())
def test_compute_fraction_of_zero_v2(self):
if self._fc_lib != feature_column_v2:
return
age = tf.feature_column.numeric_column('age')
occupation = tf.feature_column.categorical_column_with_hash_bucket(
'occupation', hash_bucket_size=5)
with tf.Graph().as_default():
model = feature_column_v2.LinearModel(
feature_columns=[age, occupation], units=3, name='linear_model')
features = {
'age': [[23.], [31.]],
'occupation': [['doctor'], ['engineer']]
}
model(features)
variables = model.variables
variables.remove(model.bias)
fraction_zero = linear._compute_fraction_of_zero(variables)
age_var = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.GLOBAL_VARIABLES, 'linear_model/age')[0]
with tf.compat.v1.Session() as sess:
sess.run([tf.compat.v1.initializers.global_variables()])
# Upon initialization, all variables will be zero.
self.assertAllClose(1, fraction_zero.eval())
sess.run(age_var.assign([[2.0, 0.0, -1.0]]))
# 1 of the 3 age weights are zero, and all of the 15 (5 hash buckets
# x 3-dim output) are zero.
self.assertAllClose(16. / 18., fraction_zero.eval())
class BaseLinearWarmStartingTest(object):
def __init__(self,
_linear_classifier_fn,
_linear_regressor_fn,
fc_lib=feature_column):
self._linear_classifier_fn = _linear_classifier_fn
self._linear_regressor_fn = _linear_regressor_fn
self._fc_lib = fc_lib
def setUp(self):
# Create a directory to save our old checkpoint and vocabularies to.
self._ckpt_and_vocab_dir = tempfile.mkdtemp()
# Make a dummy input_fn.
def _input_fn():
features = {
'age': [[23.], [31.]],
'age_in_years': [[23.], [31.]],
'occupation': [['doctor'], ['consultant']]
}
return features, [0, 1]
self._input_fn = _input_fn
def tearDown(self):
# Clean up checkpoint / vocab dir.
tf.compat.v1.summary.FileWriterCache.clear()
shutil.rmtree(self._ckpt_and_vocab_dir)
def test_classifier_basic_warm_starting(self):
age = self._fc_lib.numeric_column('age')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=linear_classifier.model_dir)
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_linear_classifier.get_variable_names():
self.assertAllClose(
linear_classifier.get_variable_value(variable_name),
warm_started_linear_classifier.get_variable_value(variable_name))
def test_regressor_basic_warm_starting(self):
age = self._fc_lib.numeric_column('age')
linear_regressor = self._linear_regressor_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
optimizer='SGD')
linear_regressor.train(input_fn=self._input_fn, max_steps=1)
# accumulator values that change).
warm_started_linear_regressor = self._linear_regressor_fn(
feature_columns=[age],
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=linear_regressor.model_dir)
warm_started_linear_regressor.train(input_fn=self._input_fn, max_steps=1)
for variable_name in warm_started_linear_regressor.get_variable_names():
self.assertAllClose(
linear_regressor.get_variable_value(variable_name),
warm_started_linear_regressor.get_variable_value(variable_name))
def test_warm_starting_selective_variables(self):
age = self._fc_lib.numeric_column('age')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[age],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
vars_to_warm_start='.*(age).*'))
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
self.assertAllClose(
linear_classifier.get_variable_value(AGE_WEIGHT_NAME),
warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))
self.assertAllClose(
[0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))
def test_warm_starting_with_vocab_remapping_and_partitioning(self):
vocab_list = ['doctor', 'lawyer', 'consultant']
vocab_file = os.path.join(self._ckpt_and_vocab_dir, 'occupation_vocab')
with open(vocab_file, 'w') as f:
f.write('\n'.join(vocab_list))
occupation = self._fc_lib.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=vocab_file,
vocabulary_size=len(vocab_list))
partitioner = tf.compat.v1.fixed_size_partitioner(num_shards=2)
linear_classifier = self._linear_classifier_fn(
feature_columns=[occupation],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD',
partitioner=partitioner)
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# accumulator values that change). Use a new FeatureColumn with a
# different vocabulary for occupation.
new_vocab_list = ['doctor', 'consultant', 'engineer']
new_vocab_file = os.path.join(self._ckpt_and_vocab_dir,
'new_occupation_vocab')
with open(new_vocab_file, 'w') as f:
f.write('\n'.join(new_vocab_list))
new_occupation = self._fc_lib.categorical_column_with_vocabulary_file(
'occupation',
vocabulary_file=new_vocab_file,
vocabulary_size=len(new_vocab_list))
# We can create our VocabInfo object from the new and old occupation
# FeatureColumn's.
occupation_vocab_info = estimator.VocabInfo(
new_vocab=new_occupation.vocabulary_file,
new_vocab_size=new_occupation.vocabulary_size,
num_oov_buckets=new_occupation.num_oov_buckets,
old_vocab=occupation.vocabulary_file,
old_vocab_size=occupation.vocabulary_size,
# use a truncated normal initializer.
backup_initializer=tf.compat.v1.initializers.random_uniform(
minval=0.39, maxval=0.39))
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[occupation],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
var_name_to_vocab_info={
OCCUPATION_WEIGHT_NAME: occupation_vocab_info
},
# Explicitly providing None here will only warm-start variables
# referenced in var_name_to_vocab_info (the bias will not be
# warm-started).
vars_to_warm_start=None),
partitioner=partitioner)
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# 'doctor' was ID-0 and still ID-0.
self.assertAllClose(
linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[0, :],
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[0, :])
# 'consultant' was ID-2 and now ID-1.
self.assertAllClose(
linear_classifier.get_variable_value(OCCUPATION_WEIGHT_NAME)[2, :],
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[1, :])
# 'engineer' is a new entry and should be initialized with the
# backup_initializer in VocabInfo.
self.assertAllClose([0.39] * 4,
warm_started_linear_classifier.get_variable_value(
OCCUPATION_WEIGHT_NAME)[2, :])
# Bias should still be zero (from initialization logic).
self.assertAllClose(
[0.0] * 4, warm_started_linear_classifier.get_variable_value(BIAS_NAME))
def test_warm_starting_with_naming_change(self):
age_in_years = self._fc_lib.numeric_column('age_in_years')
# Create a LinearClassifier and train to save a checkpoint.
linear_classifier = self._linear_classifier_fn(
feature_columns=[age_in_years],
model_dir=self._ckpt_and_vocab_dir,
n_classes=4,
optimizer='SGD')
linear_classifier.train(input_fn=self._input_fn, max_steps=1)
# Create a second LinearClassifier, warm-started from the first. Use a
# learning_rate = 0.0 optimizer to check values (use SGD so we don't have
warm_started_linear_classifier = self._linear_classifier_fn(
feature_columns=[self._fc_lib.numeric_column('age')],
n_classes=4,
optimizer=tf.compat.v1.train.GradientDescentOptimizer(
learning_rate=0.0),
warm_start_from=estimator.WarmStartSettings(
ckpt_to_initialize_from=linear_classifier.model_dir,
var_name_to_prev_var_name={
AGE_WEIGHT_NAME: AGE_WEIGHT_NAME.replace('age', 'age_in_years')
}))
warm_started_linear_classifier.train(input_fn=self._input_fn, max_steps=1)
self.assertAllClose(
linear_classifier.get_variable_value(
AGE_WEIGHT_NAME.replace('age', 'age_in_years')),
warm_started_linear_classifier.get_variable_value(AGE_WEIGHT_NAME))
self.assertAllClose(
linear_classifier.get_variable_value(BIAS_NAME),
warm_started_linear_classifier.get_variable_value(BIAS_NAME))
| true | true |
f73ac2f2d17a0c152f0b6bf64849e9993517977a | 5,334 | py | Python | hailo_model_zoo/utils/data.py | nadaved1/hailo_model_zoo | 42b716f337dde4ec602022a34d6a07a1bbd45539 | [
"MIT"
] | 29 | 2021-07-19T13:53:18.000Z | 2022-01-26T11:20:55.000Z | hailo_model_zoo/utils/data.py | nadaved1/hailo_model_zoo | 42b716f337dde4ec602022a34d6a07a1bbd45539 | [
"MIT"
] | 1 | 2022-03-18T03:27:24.000Z | 2022-03-20T14:58:41.000Z | hailo_model_zoo/utils/data.py | nadaved1/hailo_model_zoo | 42b716f337dde4ec602022a34d6a07a1bbd45539 | [
"MIT"
] | 10 | 2021-07-20T03:19:55.000Z | 2022-02-25T13:57:30.000Z | from builtins import object
import os
import cv2
import numpy as np
import tensorflow as tf
from hailo_model_zoo.core.datasets import dataset_factory
from hailo_model_zoo.utils.video_utils import VideoCapture
def _open_image_file(img_path):
image = tf.io.read_file(img_path)
image = tf.cast(tf.image.decode_jpeg(image, channels=3), tf.uint8)
image_name = tf.compat.v1.string_split([img_path], os.path.sep).values[-1]
return image, {
'img_orig': image,
'image_name': image_name,
}
def _read_npz(item):
img_name = item.decode()
data = np.load(img_name, allow_pickle=True)
base_name = os.path.basename(img_name).replace('.npz', '')
data = {key: data[key].item() for key in data}
image_info = data[base_name]['image_info']
rpn_boxes = image_info['rpn_proposals']
num_rpn_boxes = image_info['num_rpn_proposals']
return data[base_name]['logits'], rpn_boxes, num_rpn_boxes, image_info['image_name'], \
image_info['image_id']
def _open_featuremap(img_path):
featuremap, rpn_boxes, num_rpn_boxes, \
image_name, image_id = tf.compat.v1.py_func(_read_npz, [img_path], [tf.float32, tf.float32,
tf.int64, tf.string, tf.int32])
return featuremap, {"rpn_proposals": rpn_boxes,
"num_rpn_boxes": num_rpn_boxes,
"image_name": image_name,
"image_id": image_id}
def _parse_video_frame(image, name):
image = tf.cast(image, tf.uint8)
return image, {'label_index': tf.cast(0, tf.float32),
'img_orig': image,
'image_name': name,
'is_same': tf.cast(0, tf.float32),
'mask': tf.image.rgb_to_grayscale(image)}
def _video_generator(video_path):
def _video_generator_implementation():
filename = os.path.basename(video_path)
base, _ = os.path.splitext(filename)
with VideoCapture(video_path) as cap:
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
required_digits = len(str(total_frames))
number_format = '{{:0{}d}}'.format(required_digits)
name_format = '{}_frame_' + number_format + '.png'
frame_count = 0
success = True
while success:
success, image = cap.read()
if success:
image_name = name_format.format(base, frame_count)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
yield image, image_name
frame_count += 1
return _video_generator_implementation
class DataFeed(object):
"""DataFeed class. Use this class to handle input data"""
def __init__(self, preprocessing_callback, batch_size=8):
self._preproc_callback = preprocessing_callback
self._batch_size = batch_size
@property
def iterator(self):
return tf.compat.v1.data.make_initializable_iterator(self._dataset)
class TFRecordFeed(DataFeed):
def __init__(self, preprocessing_callback, batch_size, tfrecord_file, dataset_name):
super().__init__(preprocessing_callback, batch_size=batch_size)
parse_func = dataset_factory.get_dataset_parse_func(dataset_name)
dataset = tf.data.TFRecordDataset([str(tfrecord_file)]).map(parse_func)
if self._preproc_callback:
dataset = dataset.map(self._preproc_callback)
self._dataset = dataset.batch(self._batch_size)
def _dataset_from_folder(folder_path):
all_files = []
for root, dirs, files in os.walk(folder_path, topdown=False):
for name in files:
if os.path.splitext(name)[-1].lower() in ['.jpg', '.jpeg', '.png', '.npz']:
all_files.append(os.path.join(root, name))
all_files.sort()
all_files = tf.convert_to_tensor(all_files, dtype=tf.string)
dataset = tf.data.Dataset.from_tensor_slices(all_files)
return dataset
class ImageFeed(DataFeed):
def __init__(self, preprocessing_callback, batch_size, folder_path):
super().__init__(preprocessing_callback, batch_size)
dataset = _dataset_from_folder(folder_path).map(_open_image_file)
if self._preproc_callback:
dataset = dataset.map(self._preproc_callback)
self._dataset = dataset.batch(self._batch_size)
class RegionProposalFeed(DataFeed):
def __init__(self, preprocessing_callback, batch_size, folder_path):
super().__init__(preprocessing_callback, batch_size)
dataset = _dataset_from_folder(folder_path).map(_open_featuremap)
if self._preproc_callback:
dataset = dataset.map(self._preproc_callback)
dataset = dataset.apply(tf.data.experimental.unbatch())
self._dataset = dataset.batch(self._batch_size)
class VideoFeed(DataFeed):
def __init__(self, preprocessing_callback, batch_size, file_path):
super().__init__(preprocessing_callback, batch_size=batch_size)
dataset = tf.data.Dataset.from_generator(_video_generator(file_path), (tf.float32, tf.string))
dataset = dataset.map(_parse_video_frame)
if self._preproc_callback:
dataset = dataset.map(self._preproc_callback)
self._dataset = dataset.batch(self._batch_size)
| 38.934307 | 107 | 0.663105 | from builtins import object
import os
import cv2
import numpy as np
import tensorflow as tf
from hailo_model_zoo.core.datasets import dataset_factory
from hailo_model_zoo.utils.video_utils import VideoCapture
def _open_image_file(img_path):
image = tf.io.read_file(img_path)
image = tf.cast(tf.image.decode_jpeg(image, channels=3), tf.uint8)
image_name = tf.compat.v1.string_split([img_path], os.path.sep).values[-1]
return image, {
'img_orig': image,
'image_name': image_name,
}
def _read_npz(item):
img_name = item.decode()
data = np.load(img_name, allow_pickle=True)
base_name = os.path.basename(img_name).replace('.npz', '')
data = {key: data[key].item() for key in data}
image_info = data[base_name]['image_info']
rpn_boxes = image_info['rpn_proposals']
num_rpn_boxes = image_info['num_rpn_proposals']
return data[base_name]['logits'], rpn_boxes, num_rpn_boxes, image_info['image_name'], \
image_info['image_id']
def _open_featuremap(img_path):
featuremap, rpn_boxes, num_rpn_boxes, \
image_name, image_id = tf.compat.v1.py_func(_read_npz, [img_path], [tf.float32, tf.float32,
tf.int64, tf.string, tf.int32])
return featuremap, {"rpn_proposals": rpn_boxes,
"num_rpn_boxes": num_rpn_boxes,
"image_name": image_name,
"image_id": image_id}
def _parse_video_frame(image, name):
image = tf.cast(image, tf.uint8)
return image, {'label_index': tf.cast(0, tf.float32),
'img_orig': image,
'image_name': name,
'is_same': tf.cast(0, tf.float32),
'mask': tf.image.rgb_to_grayscale(image)}
def _video_generator(video_path):
def _video_generator_implementation():
filename = os.path.basename(video_path)
base, _ = os.path.splitext(filename)
with VideoCapture(video_path) as cap:
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
required_digits = len(str(total_frames))
number_format = '{{:0{}d}}'.format(required_digits)
name_format = '{}_frame_' + number_format + '.png'
frame_count = 0
success = True
while success:
success, image = cap.read()
if success:
image_name = name_format.format(base, frame_count)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
yield image, image_name
frame_count += 1
return _video_generator_implementation
class DataFeed(object):
def __init__(self, preprocessing_callback, batch_size=8):
self._preproc_callback = preprocessing_callback
self._batch_size = batch_size
@property
def iterator(self):
return tf.compat.v1.data.make_initializable_iterator(self._dataset)
class TFRecordFeed(DataFeed):
def __init__(self, preprocessing_callback, batch_size, tfrecord_file, dataset_name):
super().__init__(preprocessing_callback, batch_size=batch_size)
parse_func = dataset_factory.get_dataset_parse_func(dataset_name)
dataset = tf.data.TFRecordDataset([str(tfrecord_file)]).map(parse_func)
if self._preproc_callback:
dataset = dataset.map(self._preproc_callback)
self._dataset = dataset.batch(self._batch_size)
def _dataset_from_folder(folder_path):
all_files = []
for root, dirs, files in os.walk(folder_path, topdown=False):
for name in files:
if os.path.splitext(name)[-1].lower() in ['.jpg', '.jpeg', '.png', '.npz']:
all_files.append(os.path.join(root, name))
all_files.sort()
all_files = tf.convert_to_tensor(all_files, dtype=tf.string)
dataset = tf.data.Dataset.from_tensor_slices(all_files)
return dataset
class ImageFeed(DataFeed):
def __init__(self, preprocessing_callback, batch_size, folder_path):
super().__init__(preprocessing_callback, batch_size)
dataset = _dataset_from_folder(folder_path).map(_open_image_file)
if self._preproc_callback:
dataset = dataset.map(self._preproc_callback)
self._dataset = dataset.batch(self._batch_size)
class RegionProposalFeed(DataFeed):
def __init__(self, preprocessing_callback, batch_size, folder_path):
super().__init__(preprocessing_callback, batch_size)
dataset = _dataset_from_folder(folder_path).map(_open_featuremap)
if self._preproc_callback:
dataset = dataset.map(self._preproc_callback)
dataset = dataset.apply(tf.data.experimental.unbatch())
self._dataset = dataset.batch(self._batch_size)
class VideoFeed(DataFeed):
def __init__(self, preprocessing_callback, batch_size, file_path):
super().__init__(preprocessing_callback, batch_size=batch_size)
dataset = tf.data.Dataset.from_generator(_video_generator(file_path), (tf.float32, tf.string))
dataset = dataset.map(_parse_video_frame)
if self._preproc_callback:
dataset = dataset.map(self._preproc_callback)
self._dataset = dataset.batch(self._batch_size)
| true | true |
f73ac3dc6d5826cb29c53182ff910a6bdfb6c007 | 739 | py | Python | profiles_api/permissions.py | ndenisj/profiles-rest-api | a0c0b6d99b6dfee3121ce734615803e3ca8ca407 | [
"MIT"
] | 5 | 2019-09-26T15:51:22.000Z | 2019-09-26T15:51:41.000Z | profiles_api/permissions.py | ndenisj/profiles-rest-api | a0c0b6d99b6dfee3121ce734615803e3ca8ca407 | [
"MIT"
] | 5 | 2020-06-05T23:23:14.000Z | 2022-02-10T10:26:28.000Z | profiles_api/permissions.py | ndenisj/profiles-rest-api | a0c0b6d99b6dfee3121ce734615803e3ca8ca407 | [
"MIT"
] | null | null | null | from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
"""Allow user to edit their own profile"""
def has_object_permission(self, request, view, obj):
"""Check user is trying to edit their own profile"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
class UpdateOwnStatus(permissions.BasePermission):
"""Allow users to update their own status"""
def has_object_permission(self, request, view, obj):
"""Check the user is trying to update their own status"""
if request.method in permissions.SAFE_METHODS:
return True
return obj.user_profile.id == request.user.id
| 32.130435 | 65 | 0.692828 | from rest_framework import permissions
class UpdateOwnProfile(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.id == request.user.id
class UpdateOwnStatus(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.method in permissions.SAFE_METHODS:
return True
return obj.user_profile.id == request.user.id
| true | true |
f73ac4a510a486bbbf47eb04d9e45ed6f38c5453 | 6,599 | py | Python | tictactoe.py | erincase/tictactoe | b97790f58bbf8299e5c1241b7e9d3594ea3df893 | [
"MIT"
] | null | null | null | tictactoe.py | erincase/tictactoe | b97790f58bbf8299e5c1241b7e9d3594ea3df893 | [
"MIT"
] | null | null | null | tictactoe.py | erincase/tictactoe | b97790f58bbf8299e5c1241b7e9d3594ea3df893 | [
"MIT"
] | null | null | null | from copy import deepcopy
from typing import NamedTuple, Optional
import constants as c
def main():
"""
Runs a game of tic-tac-toe. Asks players "X" and "O" to alternate choosing squares of the
tic-tac-toe board to fill in. After each turn, will check the board to see if there is a winner.
If all squares are filled but no winner is detected, will end and call it a tie-game.
"""
# each square in the board is assigned a label (1a-3c)
board_values = deepcopy(c.INITIAL_BOARD_VALUES)
print_welcome_message(board_values)
winner = None
current_player = None
while winner is None:
# current player is either "X" or "O"
current_player = get_next_player(current_player)
# ask the current player to choose a square
chosen_square = get_next_move(current_player, board_values)
# update the board, show it, and check for a winner or a full board
board_values[chosen_square] = current_player
print_board(board_values)
winner = get_winner(board_values)
print(get_final_message(winner))
def print_welcome_message(board_values: dict[str, str]):
"""
Prints a nice welcome message and displays the empty board.
:param board_values: Map of current, empty board values. Empty squares should map to their
square label.
"""
print(c.INTRO_MESSAGE)
print_board(board_values)
def print_board(board_values: dict[str, str]):
"""
Prints a diagram of the current tic-tac-toe board.
:param board_values: Map of current board values. Empty squares should map to their square
label. Filled squares should map to either "X" or "O".
"""
print(c.BOARD.format(**board_values))
def get_final_message(winner: str) -> str:
"""
Displays the final message indicating the winner. If there is no winner but the board is full,
will display a tie message.
:param winner: "X", "O", or "Tie"
"""
if winner == c.TIE:
return c.TIE_MESSAGE
else:
return c.WINNER_MESSAGE.format(player=winner)
def get_next_player(current_player: Optional[str]) -> str:
"""
Chooses the next player, based on the current one. If there is no current player, we'll start
with X.
:param current_player: "X", "O", or None
:return: The next player (either "X" or "O")
"""
if current_player == c.X:
return c.O
else:
return c.X
def get_next_move(current_player: str, board_values: dict[str, str]) -> str:
"""
Asks the current player to pick an empty square and returns their choice. If the player inputs
an invalid choice, will print an invalid response message and ask again until valid input is
received. Invalid choices include:
- a value that doesn't map to a square label (1a-3c)
- a square that has already been filled in by either player
:param current_player: The player to request input for (either "X" or "O")
:param board_values: Map from square label (1a-3c) to square's values (1a-3c for empty squares,
"X" or "O" for filled squares)
:return: The current player's chosen square
"""
valid_input = False
while not valid_input:
# take care of any excess whitespace around the input and converts to lowercase
raw_input = input(c.NEXT_TURN_MESSAGE.format(player=current_player))
validation_result = get_validation_result(raw_input, board_values)
if not validation_result.is_valid:
print(validation_result.error_message)
continue
return validation_result.cleaned_input
class ValidationResult(NamedTuple):
""" Represents results of validations performed on a player's raw input during the game """
is_valid: bool
cleaned_input: str
error_message: Optional[str]
def get_validation_result(raw_input: str, board_values: dict[str, str]) -> ValidationResult:
"""
Removes excess whitespace around the input, converts to lowercase, and checks that the input is
valid. Invalid choices include:
- a value that doesn't map to a square label (1a-3c)
- a square that has already been filled in by either player
:param raw_input: Raw input from player
:param board_values: Map from square label (1a-3c) to square's values (1a-3c for empty squares,
"X" or "O" for filled squares)
:return: ValidationResult indicating whether the input was valid ("is_valid"), the cleaned
input with whitespace removed/converted to lowercase ("cleaned_input"), and, if not
valid, an error message explaining why the input did not pass validation
("error_message").
"""
# take care of any excess whitespace around the input and convert to lowercase
cleaned_input = raw_input.strip().lower()
# check if the input is a valid value (1a-3c)
if cleaned_input not in board_values.keys():
return ValidationResult(is_valid=False,
cleaned_input=cleaned_input,
error_message=c.INVALID_SQUARE_MESSAGE.format(input=cleaned_input))
# check if the input is a square that's already filled
if board_values[cleaned_input] in [c.X, c.O]:
return ValidationResult(is_valid=False,
cleaned_input=cleaned_input,
error_message=c.UNAVAILABLE_SQUARE_MESSAGE.format(
square=cleaned_input))
return ValidationResult(is_valid=True,
cleaned_input=cleaned_input,
error_message=None)
def get_winner(board_values: dict[str, str]) -> Optional[str]:
"""
Checks the board values to see if a player has won by manually checking all possible
combinations (horizontal lines, vertical lines, and two diagonals). A player has won a combo if
all entries in the combo have the same value (either "X" or "O").
:param board_values: Map from square label (1a-3c) to square's values (1a-3c for empty squares,
"X" or "O" for filled squares)
:return: The winner of the game ("X" or "O")
"""
for combo in c.WINNING_COMBOS:
entries = {board_values[k] for k in combo}
if len(entries) == 1:
return entries.pop()
# if all the squares are filled but no winners were detected, it's a tie
if set(board_values.values()) == {c.X, c.O}:
return c.TIE
return None
if __name__ == "__main__":
main()
| 37.282486 | 100 | 0.661161 | from copy import deepcopy
from typing import NamedTuple, Optional
import constants as c
def main():
board_values = deepcopy(c.INITIAL_BOARD_VALUES)
print_welcome_message(board_values)
winner = None
current_player = None
while winner is None:
current_player = get_next_player(current_player)
chosen_square = get_next_move(current_player, board_values)
board_values[chosen_square] = current_player
print_board(board_values)
winner = get_winner(board_values)
print(get_final_message(winner))
def print_welcome_message(board_values: dict[str, str]):
print(c.INTRO_MESSAGE)
print_board(board_values)
def print_board(board_values: dict[str, str]):
print(c.BOARD.format(**board_values))
def get_final_message(winner: str) -> str:
if winner == c.TIE:
return c.TIE_MESSAGE
else:
return c.WINNER_MESSAGE.format(player=winner)
def get_next_player(current_player: Optional[str]) -> str:
if current_player == c.X:
return c.O
else:
return c.X
def get_next_move(current_player: str, board_values: dict[str, str]) -> str:
valid_input = False
while not valid_input:
raw_input = input(c.NEXT_TURN_MESSAGE.format(player=current_player))
validation_result = get_validation_result(raw_input, board_values)
if not validation_result.is_valid:
print(validation_result.error_message)
continue
return validation_result.cleaned_input
class ValidationResult(NamedTuple):
is_valid: bool
cleaned_input: str
error_message: Optional[str]
def get_validation_result(raw_input: str, board_values: dict[str, str]) -> ValidationResult:
cleaned_input = raw_input.strip().lower()
if cleaned_input not in board_values.keys():
return ValidationResult(is_valid=False,
cleaned_input=cleaned_input,
error_message=c.INVALID_SQUARE_MESSAGE.format(input=cleaned_input))
if board_values[cleaned_input] in [c.X, c.O]:
return ValidationResult(is_valid=False,
cleaned_input=cleaned_input,
error_message=c.UNAVAILABLE_SQUARE_MESSAGE.format(
square=cleaned_input))
return ValidationResult(is_valid=True,
cleaned_input=cleaned_input,
error_message=None)
def get_winner(board_values: dict[str, str]) -> Optional[str]:
for combo in c.WINNING_COMBOS:
entries = {board_values[k] for k in combo}
if len(entries) == 1:
return entries.pop()
# if all the squares are filled but no winners were detected, it's a tie
if set(board_values.values()) == {c.X, c.O}:
return c.TIE
return None
if __name__ == "__main__":
main()
| true | true |
f73ac593c4c8b4927ec63131454d97584ada7dc4 | 250 | py | Python | python--exercicios/ex034.py | Eliezer2000/python | 12abb54c6536acb2f36b8f34bf51ec765857eb75 | [
"MIT"
] | null | null | null | python--exercicios/ex034.py | Eliezer2000/python | 12abb54c6536acb2f36b8f34bf51ec765857eb75 | [
"MIT"
] | null | null | null | python--exercicios/ex034.py | Eliezer2000/python | 12abb54c6536acb2f36b8f34bf51ec765857eb75 | [
"MIT"
] | null | null | null | salário = float(input('Digite o valor do seu salário: R$'))
if salário <= 1250:
novo = salário + (salário * 15 / 100)
else:
novo = salário + (salário * 10 / 100)
print('Quem ganhava R$ {:.2f} passou a ganhar R$ {:.2f}'.format(salário, novo))
| 35.714286 | 79 | 0.628 | salário = float(input('Digite o valor do seu salário: R$'))
if salário <= 1250:
novo = salário + (salário * 15 / 100)
else:
novo = salário + (salário * 10 / 100)
print('Quem ganhava R$ {:.2f} passou a ganhar R$ {:.2f}'.format(salário, novo))
| true | true |
f73ac64c9ba336d2b40baf7c8d91aeeb100b3ed3 | 4,531 | py | Python | AxxellClient/models/data_point.py | cinaq/axxell-client-python | a862dd36552ef8149517c5d5034a52a37abc2d33 | [
"Apache-2.0"
] | null | null | null | AxxellClient/models/data_point.py | cinaq/axxell-client-python | a862dd36552ef8149517c5d5034a52a37abc2d33 | [
"Apache-2.0"
] | null | null | null | AxxellClient/models/data_point.py | cinaq/axxell-client-python | a862dd36552ef8149517c5d5034a52a37abc2d33 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
axxell-api
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: 1.0.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from pprint import pformat
from six import iteritems
import re
class DataPoint(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, creation_time=None, label=None, value=None):
"""
DataPoint - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'creation_time': 'str',
'label': 'str',
'value': 'float'
}
self.attribute_map = {
'creation_time': 'creationTime',
'label': 'label',
'value': 'value'
}
self._creation_time = creation_time
self._label = label
self._value = value
@property
def creation_time(self):
"""
Gets the creation_time of this DataPoint.
:return: The creation_time of this DataPoint.
:rtype: str
"""
return self._creation_time
@creation_time.setter
def creation_time(self, creation_time):
"""
Sets the creation_time of this DataPoint.
:param creation_time: The creation_time of this DataPoint.
:type: str
"""
self._creation_time = creation_time
@property
def label(self):
"""
Gets the label of this DataPoint.
:return: The label of this DataPoint.
:rtype: str
"""
return self._label
@label.setter
def label(self, label):
"""
Sets the label of this DataPoint.
:param label: The label of this DataPoint.
:type: str
"""
self._label = label
@property
def value(self):
"""
Gets the value of this DataPoint.
:return: The value of this DataPoint.
:rtype: float
"""
return self._value
@value.setter
def value(self, value):
"""
Sets the value of this DataPoint.
:param value: The value of this DataPoint.
:type: float
"""
self._value = value
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 25.312849 | 105 | 0.557051 |
from pprint import pformat
from six import iteritems
import re
class DataPoint(object):
def __init__(self, creation_time=None, label=None, value=None):
self.swagger_types = {
'creation_time': 'str',
'label': 'str',
'value': 'float'
}
self.attribute_map = {
'creation_time': 'creationTime',
'label': 'label',
'value': 'value'
}
self._creation_time = creation_time
self._label = label
self._value = value
@property
def creation_time(self):
return self._creation_time
@creation_time.setter
def creation_time(self, creation_time):
self._creation_time = creation_time
@property
def label(self):
return self._label
@label.setter
def label(self, label):
self._label = label
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true | true |
f73ac79184edf7e3eabd0dc8ef2b2b487ecbbd5f | 56,828 | py | Python | Lib/tkinter/test/test_ttk/test_widgets.py | adamwen829/cpython | 0f1c7c760c6b2804f5d05cae9ca045d1fdf3d667 | [
"PSF-2.0"
] | 2 | 2017-05-05T02:07:59.000Z | 2017-08-18T09:24:48.000Z | Lib/tkinter/test/test_ttk/test_widgets.py | adamwen829/cpython | 0f1c7c760c6b2804f5d05cae9ca045d1fdf3d667 | [
"PSF-2.0"
] | null | null | null | Lib/tkinter/test/test_ttk/test_widgets.py | adamwen829/cpython | 0f1c7c760c6b2804f5d05cae9ca045d1fdf3d667 | [
"PSF-2.0"
] | 3 | 2016-04-21T07:58:27.000Z | 2016-05-06T21:34:44.000Z | import unittest
import tkinter
from tkinter import ttk
from test.support import requires
import sys
from tkinter.test.test_ttk.test_functions import MockTclObj
from tkinter.test.support import (AbstractTkTest, tcl_version, get_tk_patchlevel,
simulate_mouse_click)
from tkinter.test.widget_tests import (add_standard_options, noconv,
AbstractWidgetTest, StandardOptionsTests, IntegerSizeTests, PixelSizeTests,
setUpModule)
requires('gui')
class StandardTtkOptionsTests(StandardOptionsTests):
def test_class(self):
widget = self.create()
self.assertEqual(widget['class'], '')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0): # actually this was changed in 8.6b3
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'class', 'Foo', errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
def test_padding(self):
widget = self.create()
self.checkParam(widget, 'padding', 0, expected=('0',))
self.checkParam(widget, 'padding', 5, expected=('5',))
self.checkParam(widget, 'padding', (5, 6), expected=('5', '6'))
self.checkParam(widget, 'padding', (5, 6, 7),
expected=('5', '6', '7'))
self.checkParam(widget, 'padding', (5, 6, 7, 8),
expected=('5', '6', '7', '8'))
self.checkParam(widget, 'padding', ('5p', '6p', '7p', '8p'))
self.checkParam(widget, 'padding', (), expected='')
def test_style(self):
widget = self.create()
self.assertEqual(widget['style'], '')
errmsg = 'Layout Foo not found'
if hasattr(self, 'default_orient'):
errmsg = ('Layout %s.Foo not found' %
getattr(self, 'default_orient').title())
self.checkInvalidParam(widget, 'style', 'Foo',
errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
# XXX
pass
class WidgetTest(AbstractTkTest, unittest.TestCase):
"""Tests methods available in every ttk widget."""
def setUp(self):
super().setUp()
self.widget = ttk.Button(self.root, width=0, text="Text")
self.widget.pack()
self.widget.wait_visibility()
def test_identify(self):
self.widget.update_idletasks()
self.assertEqual(self.widget.identify(
int(self.widget.winfo_width() / 2),
int(self.widget.winfo_height() / 2)
), "label")
self.assertEqual(self.widget.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.widget.identify, None, 5)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, None)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, '')
def test_widget_state(self):
# XXX not sure about the portability of all these tests
self.assertEqual(self.widget.state(), ())
self.assertEqual(self.widget.instate(['!disabled']), True)
# changing from !disabled to disabled
self.assertEqual(self.widget.state(['disabled']), ('!disabled', ))
# no state change
self.assertEqual(self.widget.state(['disabled']), ())
# change back to !disable but also active
self.assertEqual(self.widget.state(['!disabled', 'active']),
('!active', 'disabled'))
# no state changes, again
self.assertEqual(self.widget.state(['!disabled', 'active']), ())
self.assertEqual(self.widget.state(['active', '!disabled']), ())
def test_cb(arg1, **kw):
return arg1, kw
self.assertEqual(self.widget.instate(['!disabled'],
test_cb, "hi", **{"msg": "there"}),
('hi', {'msg': 'there'}))
# attempt to set invalid statespec
currstate = self.widget.state()
self.assertRaises(tkinter.TclError, self.widget.instate,
['badstate'])
self.assertRaises(tkinter.TclError, self.widget.instate,
['disabled', 'badstate'])
# verify that widget didn't change its state
self.assertEqual(currstate, self.widget.state())
# ensuring that passing None as state doesn't modify current state
self.widget.state(['active', '!disabled'])
self.assertEqual(self.widget.state(), ('active', ))
class AbstractToplevelTest(AbstractWidgetTest, PixelSizeTests):
_conv_pixels = noconv
@add_standard_options(StandardTtkOptionsTests)
class FrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'padding', 'relief', 'style', 'takefocus',
'width',
)
def create(self, **kwargs):
return ttk.Frame(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class LabelFrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'labelanchor', 'labelwidget',
'padding', 'relief', 'style', 'takefocus',
'text', 'underline', 'width',
)
def create(self, **kwargs):
return ttk.LabelFrame(self.root, **kwargs)
def test_labelanchor(self):
widget = self.create()
self.checkEnumParam(widget, 'labelanchor',
'e', 'en', 'es', 'n', 'ne', 'nw', 's', 'se', 'sw', 'w', 'wn', 'ws',
errmsg='Bad label anchor specification {}')
self.checkInvalidParam(widget, 'labelanchor', 'center')
def test_labelwidget(self):
widget = self.create()
label = ttk.Label(self.root, text='Mupp', name='foo')
self.checkParam(widget, 'labelwidget', label, expected='.foo')
label.destroy()
class AbstractLabelTest(AbstractWidgetTest):
def checkImageParam(self, widget, name):
image = tkinter.PhotoImage(master=self.root, name='image1')
image2 = tkinter.PhotoImage(master=self.root, name='image2')
self.checkParam(widget, name, image, expected=('image1',))
self.checkParam(widget, name, 'image1', expected=('image1',))
self.checkParam(widget, name, (image,), expected=('image1',))
self.checkParam(widget, name, (image, 'active', image2),
expected=('image1', 'active', 'image2'))
self.checkParam(widget, name, 'image1 active image2',
expected=('image1', 'active', 'image2'))
self.checkInvalidParam(widget, name, 'spam',
errmsg='image "spam" doesn\'t exist')
def test_compound(self):
widget = self.create()
self.checkEnumParam(widget, 'compound',
'none', 'text', 'image', 'center',
'top', 'bottom', 'left', 'right')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def test_width(self):
widget = self.create()
self.checkParams(widget, 'width', 402, -402, 0)
@add_standard_options(StandardTtkOptionsTests)
class LabelTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'anchor', 'background',
'class', 'compound', 'cursor', 'font', 'foreground',
'image', 'justify', 'padding', 'relief', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength',
)
_conv_pixels = noconv
def create(self, **kwargs):
return ttk.Label(self.root, **kwargs)
def test_font(self):
widget = self.create()
self.checkParam(widget, 'font',
'-Adobe-Helvetica-Medium-R-Normal--*-120-*-*-*-*-*-*')
@add_standard_options(StandardTtkOptionsTests)
class ButtonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor', 'default',
'image', 'state', 'style', 'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Button(self.root, **kwargs)
def test_default(self):
widget = self.create()
self.checkEnumParam(widget, 'default', 'normal', 'active', 'disabled')
def test_invoke(self):
success = []
btn = ttk.Button(self.root, command=lambda: success.append(1))
btn.invoke()
self.assertTrue(success)
@add_standard_options(StandardTtkOptionsTests)
class CheckbuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'offvalue', 'onvalue',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Checkbutton(self.root, **kwargs)
def test_offvalue(self):
widget = self.create()
self.checkParams(widget, 'offvalue', 1, 2.3, '', 'any string')
def test_onvalue(self):
widget = self.create()
self.checkParams(widget, 'onvalue', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
cbtn = ttk.Checkbutton(self.root, command=cb_test)
# the variable automatically created by ttk.Checkbutton is actually
# undefined till we invoke the Checkbutton
self.assertEqual(cbtn.state(), ('alternate', ))
self.assertRaises(tkinter.TclError, cbtn.tk.globalgetvar,
cbtn['variable'])
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['onvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn['command'] = ''
res = cbtn.invoke()
self.assertFalse(str(res))
self.assertLessEqual(len(success), 1)
self.assertEqual(cbtn['offvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class ComboboxTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'exportselection', 'height',
'justify', 'postcommand', 'state', 'style',
'takefocus', 'textvariable', 'values', 'width',
)
def setUp(self):
super().setUp()
self.combo = self.create()
def create(self, **kwargs):
return ttk.Combobox(self.root, **kwargs)
def test_height(self):
widget = self.create()
self.checkParams(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def _show_drop_down_listbox(self):
width = self.combo.winfo_width()
self.combo.event_generate('<ButtonPress-1>', x=width - 5, y=5)
self.combo.event_generate('<ButtonRelease-1>', x=width - 5, y=5)
self.combo.update_idletasks()
def test_virtual_event(self):
success = []
self.combo['values'] = [1]
self.combo.bind('<<ComboboxSelected>>',
lambda evt: success.append(True))
self.combo.pack()
self.combo.wait_visibility()
height = self.combo.winfo_height()
self._show_drop_down_listbox()
self.combo.update()
self.combo.event_generate('<Return>')
self.combo.update()
self.assertTrue(success)
def test_postcommand(self):
success = []
self.combo['postcommand'] = lambda: success.append(True)
self.combo.pack()
self.combo.wait_visibility()
self._show_drop_down_listbox()
self.assertTrue(success)
# testing postcommand removal
self.combo['postcommand'] = ''
self._show_drop_down_listbox()
self.assertEqual(len(success), 1)
def test_values(self):
def check_get_current(getval, currval):
self.assertEqual(self.combo.get(), getval)
self.assertEqual(self.combo.current(), currval)
self.assertEqual(self.combo['values'],
() if tcl_version < (8, 5) else '')
check_get_current('', -1)
self.checkParam(self.combo, 'values', 'mon tue wed thur',
expected=('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', ('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', (42, 3.14, '', 'any string'))
self.checkParam(self.combo, 'values', '', expected=())
self.combo['values'] = ['a', 1, 'c']
self.combo.set('c')
check_get_current('c', 2)
self.combo.current(0)
check_get_current('a', 0)
self.combo.set('d')
check_get_current('d', -1)
# testing values with empty string
self.combo.set('')
self.combo['values'] = (1, 2, '', 3)
check_get_current('', 2)
# testing values with empty string set through configure
self.combo.configure(values=[1, '', 2])
self.assertEqual(self.combo['values'],
('1', '', '2') if self.wantobjects else
'1 {} 2')
# testing values with spaces
self.combo['values'] = ['a b', 'a\tb', 'a\nb']
self.assertEqual(self.combo['values'],
('a b', 'a\tb', 'a\nb') if self.wantobjects else
'{a b} {a\tb} {a\nb}')
# testing values with special characters
self.combo['values'] = [r'a\tb', '"a"', '} {']
self.assertEqual(self.combo['values'],
(r'a\tb', '"a"', '} {') if self.wantobjects else
r'a\\tb {"a"} \}\ \{')
# out of range
self.assertRaises(tkinter.TclError, self.combo.current,
len(self.combo['values']))
# it expects an integer (or something that can be converted to int)
self.assertRaises(tkinter.TclError, self.combo.current, '')
# testing creating combobox with empty string in values
combo2 = ttk.Combobox(self.root, values=[1, 2, ''])
self.assertEqual(combo2['values'],
('1', '2', '') if self.wantobjects else '1 2 {}')
combo2.destroy()
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class EntryTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'class', 'cursor',
'exportselection', 'font',
'invalidcommand', 'justify',
'show', 'state', 'style', 'takefocus', 'textvariable',
'validate', 'validatecommand', 'width', 'xscrollcommand',
)
def setUp(self):
super().setUp()
self.entry = self.create()
def create(self, **kwargs):
return ttk.Entry(self.root, **kwargs)
def test_invalidcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'invalidcommand')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', '*')
self.checkParam(widget, 'show', '')
self.checkParam(widget, 'show', ' ')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state',
'disabled', 'normal', 'readonly')
def test_validate(self):
widget = self.create()
self.checkEnumParam(widget, 'validate',
'all', 'key', 'focus', 'focusin', 'focusout', 'none')
def test_validatecommand(self):
widget = self.create()
self.checkCommandParam(widget, 'validatecommand')
def test_bbox(self):
self.assertIsBoundingBox(self.entry.bbox(0))
self.assertRaises(tkinter.TclError, self.entry.bbox, 'noindex')
self.assertRaises(tkinter.TclError, self.entry.bbox, None)
def test_identify(self):
self.entry.pack()
self.entry.wait_visibility()
self.entry.update_idletasks()
self.assertEqual(self.entry.identify(5, 5), "textarea")
self.assertEqual(self.entry.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.entry.identify, None, 5)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, None)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, '')
def test_validation_options(self):
success = []
test_invalid = lambda: success.append(True)
self.entry['validate'] = 'none'
self.entry['validatecommand'] = lambda: False
self.entry['invalidcommand'] = test_invalid
self.entry.validate()
self.assertTrue(success)
self.entry['invalidcommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['invalidcommand'] = test_invalid
self.entry['validatecommand'] = lambda: True
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = True
self.assertRaises(tkinter.TclError, self.entry.validate)
def test_validation(self):
validation = []
def validate(to_insert):
if not 'a' <= to_insert.lower() <= 'z':
validation.append(False)
return False
validation.append(True)
return True
self.entry['validate'] = 'key'
self.entry['validatecommand'] = self.entry.register(validate), '%S'
self.entry.insert('end', 1)
self.entry.insert('end', 'a')
self.assertEqual(validation, [False, True])
self.assertEqual(self.entry.get(), 'a')
def test_revalidation(self):
def validate(content):
for letter in content:
if not 'a' <= letter.lower() <= 'z':
return False
return True
self.entry['validatecommand'] = self.entry.register(validate), '%P'
self.entry.insert('end', 'avocado')
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
self.entry.delete(0, 'end')
self.assertEqual(self.entry.get(), '')
self.entry.insert('end', 'a1b')
self.assertEqual(self.entry.validate(), False)
self.assertEqual(self.entry.state(), ('invalid', ))
self.entry.delete(1)
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height',
'orient', 'style', 'takefocus', 'width',
)
def setUp(self):
super().setUp()
self.paned = self.create()
def create(self, **kwargs):
return ttk.PanedWindow(self.root, **kwargs)
def test_orient(self):
widget = self.create()
self.assertEqual(str(widget['orient']), 'vertical')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0): # actually this was changed in 8.6b3
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'orient', 'horizontal',
errmsg=errmsg)
widget2 = self.create(orient='horizontal')
self.assertEqual(str(widget2['orient']), 'horizontal')
def test_add(self):
# attempt to add a child that is not a direct child of the paned window
label = ttk.Label(self.paned)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
label.destroy()
child.destroy()
# another attempt
label = ttk.Label(self.root)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
child.destroy()
label.destroy()
good_child = ttk.Label(self.root)
self.paned.add(good_child)
# re-adding a child is not accepted
self.assertRaises(tkinter.TclError, self.paned.add, good_child)
other_child = ttk.Label(self.paned)
self.paned.add(other_child)
self.assertEqual(self.paned.pane(0), self.paned.pane(1))
self.assertRaises(tkinter.TclError, self.paned.pane, 2)
good_child.destroy()
other_child.destroy()
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.paned.forget, None)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
self.paned.add(ttk.Label(self.root))
self.paned.forget(0)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
def test_insert(self):
self.assertRaises(tkinter.TclError, self.paned.insert, None, 0)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, None)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, 0)
child = ttk.Label(self.root)
child2 = ttk.Label(self.root)
child3 = ttk.Label(self.root)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, child)
self.paned.insert('end', child2)
self.paned.insert(0, child)
self.assertEqual(self.paned.panes(), (str(child), str(child2)))
self.paned.insert(0, child2)
self.assertEqual(self.paned.panes(), (str(child2), str(child)))
self.paned.insert('end', child3)
self.assertEqual(self.paned.panes(),
(str(child2), str(child), str(child3)))
# reinserting a child should move it to its current position
panes = self.paned.panes()
self.paned.insert('end', child3)
self.assertEqual(panes, self.paned.panes())
# moving child3 to child2 position should result in child2 ending up
# in previous child position and child ending up in previous child3
# position
self.paned.insert(child2, child3)
self.assertEqual(self.paned.panes(),
(str(child3), str(child2), str(child)))
def test_pane(self):
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
child = ttk.Label(self.root)
self.paned.add(child)
self.assertIsInstance(self.paned.pane(0), dict)
self.assertEqual(self.paned.pane(0, weight=None),
0 if self.wantobjects else '0')
# newer form for querying a single option
self.assertEqual(self.paned.pane(0, 'weight'),
0 if self.wantobjects else '0')
self.assertEqual(self.paned.pane(0), self.paned.pane(str(child)))
self.assertRaises(tkinter.TclError, self.paned.pane, 0,
badoption='somevalue')
def test_sashpos(self):
self.assertRaises(tkinter.TclError, self.paned.sashpos, None)
self.assertRaises(tkinter.TclError, self.paned.sashpos, '')
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child = ttk.Label(self.paned, text='a')
self.paned.add(child, weight=1)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child2 = ttk.Label(self.paned, text='b')
self.paned.add(child2)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 1)
self.paned.pack(expand=True, fill='both')
self.paned.wait_visibility()
curr_pos = self.paned.sashpos(0)
self.paned.sashpos(0, 1000)
self.assertNotEqual(curr_pos, self.paned.sashpos(0))
self.assertIsInstance(self.paned.sashpos(0), int)
@add_standard_options(StandardTtkOptionsTests)
class RadiobuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'value', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Radiobutton(self.root, **kwargs)
def test_value(self):
widget = self.create()
self.checkParams(widget, 'value', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
myvar = tkinter.IntVar(self.root)
cbtn = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=0)
cbtn2 = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=1)
if self.wantobjects:
conv = lambda x: x
else:
conv = int
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(conv(cbtn['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertTrue(success)
cbtn2['command'] = ''
res = cbtn2.invoke()
self.assertEqual(str(res), '')
self.assertLessEqual(len(success), 1)
self.assertEqual(conv(cbtn2['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertEqual(str(cbtn['variable']), str(cbtn2['variable']))
class MenubuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'compound', 'cursor', 'direction',
'image', 'menu', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Menubutton(self.root, **kwargs)
def test_direction(self):
widget = self.create()
self.checkEnumParam(widget, 'direction',
'above', 'below', 'left', 'right', 'flush')
def test_menu(self):
widget = self.create()
menu = tkinter.Menu(widget, name='menu')
self.checkParam(widget, 'menu', menu, conv=str)
menu.destroy()
@add_standard_options(StandardTtkOptionsTests)
class ScaleTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'from', 'length',
'orient', 'style', 'takefocus', 'to', 'value', 'variable',
)
_conv_pixels = noconv
default_orient = 'horizontal'
def setUp(self):
super().setUp()
self.scale = self.create()
self.scale.pack()
self.scale.update()
def create(self, **kwargs):
return ttk.Scale(self.root, **kwargs)
def test_from(self):
widget = self.create()
self.checkFloatParam(widget, 'from', 100, 14.9, 15.1, conv=False)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 130, 131.2, 135.6, '5i')
def test_to(self):
widget = self.create()
self.checkFloatParam(widget, 'to', 300, 14.9, 15.1, -10, conv=False)
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 300, 14.9, 15.1, -10, conv=False)
def test_custom_event(self):
failure = [1, 1, 1] # will need to be empty
funcid = self.scale.bind('<<RangeChanged>>', lambda evt: failure.pop())
self.scale['from'] = 10
self.scale['from_'] = 10
self.scale['to'] = 3
self.assertFalse(failure)
failure = [1, 1, 1]
self.scale.configure(from_=2, to=5)
self.scale.configure(from_=0, to=-2)
self.scale.configure(to=10)
self.assertFalse(failure)
def test_get(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
scale_width = self.scale.winfo_width()
self.assertEqual(self.scale.get(scale_width, 0), self.scale['to'])
self.assertEqual(conv(self.scale.get(0, 0)), conv(self.scale['from']))
self.assertEqual(self.scale.get(), self.scale['value'])
self.scale['value'] = 30
self.assertEqual(self.scale.get(), self.scale['value'])
self.assertRaises(tkinter.TclError, self.scale.get, '', 0)
self.assertRaises(tkinter.TclError, self.scale.get, 0, '')
def test_set(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
# set restricts the max/min values according to the current range
max = conv(self.scale['to'])
new_max = max + 10
self.scale.set(new_max)
self.assertEqual(conv(self.scale.get()), max)
min = conv(self.scale['from'])
self.scale.set(min - 1)
self.assertEqual(conv(self.scale.get()), min)
# changing directly the variable doesn't impose this limitation tho
var = tkinter.DoubleVar(self.root)
self.scale['variable'] = var
var.set(max + 5)
self.assertEqual(conv(self.scale.get()), var.get())
self.assertEqual(conv(self.scale.get()), max + 5)
del var
# the same happens with the value option
self.scale['value'] = max + 10
self.assertEqual(conv(self.scale.get()), max + 10)
self.assertEqual(conv(self.scale.get()), conv(self.scale['value']))
# nevertheless, note that the max/min values we can get specifying
# x, y coords are the ones according to the current range
self.assertEqual(conv(self.scale.get(0, 0)), min)
self.assertEqual(conv(self.scale.get(self.scale.winfo_width(), 0)), max)
self.assertRaises(tkinter.TclError, self.scale.set, None)
@add_standard_options(StandardTtkOptionsTests)
class ProgressbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'length',
'mode', 'maximum', 'phase',
'style', 'takefocus', 'value', 'variable',
)
_conv_pixels = noconv
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Progressbar(self.root, **kwargs)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 100.1, 56.7, '2i')
def test_maximum(self):
widget = self.create()
self.checkFloatParam(widget, 'maximum', 150.2, 77.7, 0, -10, conv=False)
def test_mode(self):
widget = self.create()
self.checkEnumParam(widget, 'mode', 'determinate', 'indeterminate')
def test_phase(self):
# XXX
pass
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 150.2, 77.7, 0, -10,
conv=False)
@unittest.skipIf(sys.platform == 'darwin',
'ttk.Scrollbar is special on MacOSX')
@add_standard_options(StandardTtkOptionsTests)
class ScrollbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'orient', 'style', 'takefocus',
)
default_orient = 'vertical'
def create(self, **kwargs):
return ttk.Scrollbar(self.root, **kwargs)
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class NotebookTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height', 'padding', 'style', 'takefocus',
)
def setUp(self):
super().setUp()
self.nb = self.create(padding=0)
self.child1 = ttk.Label(self.root)
self.child2 = ttk.Label(self.root)
self.nb.add(self.child1, text='a')
self.nb.add(self.child2, text='b')
def create(self, **kwargs):
return ttk.Notebook(self.root, **kwargs)
def test_tab_identifiers(self):
self.nb.forget(0)
self.nb.hide(self.child2)
self.assertRaises(tkinter.TclError, self.nb.tab, self.child1)
self.assertEqual(self.nb.index('end'), 1)
self.nb.add(self.child2)
self.assertEqual(self.nb.index('end'), 1)
self.nb.select(self.child2)
self.assertTrue(self.nb.tab('current'))
self.nb.add(self.child1, text='a')
self.nb.pack()
self.nb.wait_visibility()
if sys.platform == 'darwin':
tb_idx = "@20,5"
else:
tb_idx = "@5,5"
self.assertEqual(self.nb.tab(tb_idx), self.nb.tab('current'))
for i in range(5, 100, 5):
try:
if self.nb.tab('@%d, 5' % i, text=None) == 'a':
break
except tkinter.TclError:
pass
else:
self.fail("Tab with text 'a' not found")
def test_add_and_hidden(self):
self.assertRaises(tkinter.TclError, self.nb.hide, -1)
self.assertRaises(tkinter.TclError, self.nb.hide, 'hi')
self.assertRaises(tkinter.TclError, self.nb.hide, None)
self.assertRaises(tkinter.TclError, self.nb.add, None)
self.assertRaises(tkinter.TclError, self.nb.add, ttk.Label(self.root),
unknown='option')
tabs = self.nb.tabs()
self.nb.hide(self.child1)
self.nb.add(self.child1)
self.assertEqual(self.nb.tabs(), tabs)
child = ttk.Label(self.root)
self.nb.add(child, text='c')
tabs = self.nb.tabs()
curr = self.nb.index('current')
# verify that the tab gets readded at its previous position
child2_index = self.nb.index(self.child2)
self.nb.hide(self.child2)
self.nb.add(self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.assertEqual(self.nb.index(self.child2), child2_index)
self.assertEqual(str(self.child2), self.nb.tabs()[child2_index])
# but the tab next to it (not hidden) is the one selected now
self.assertEqual(self.nb.index('current'), curr + 1)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.nb.forget, -1)
self.assertRaises(tkinter.TclError, self.nb.forget, 'hi')
self.assertRaises(tkinter.TclError, self.nb.forget, None)
tabs = self.nb.tabs()
child1_index = self.nb.index(self.child1)
self.nb.forget(self.child1)
self.assertNotIn(str(self.child1), self.nb.tabs())
self.assertEqual(len(tabs) - 1, len(self.nb.tabs()))
self.nb.add(self.child1)
self.assertEqual(self.nb.index(self.child1), 1)
self.assertNotEqual(child1_index, self.nb.index(self.child1))
def test_index(self):
self.assertRaises(tkinter.TclError, self.nb.index, -1)
self.assertRaises(tkinter.TclError, self.nb.index, None)
self.assertIsInstance(self.nb.index('end'), int)
self.assertEqual(self.nb.index(self.child1), 0)
self.assertEqual(self.nb.index(self.child2), 1)
self.assertEqual(self.nb.index('end'), 2)
def test_insert(self):
# moving tabs
tabs = self.nb.tabs()
self.nb.insert(1, tabs[0])
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert(self.child1, self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert('end', self.child1)
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert('end', 0)
self.assertEqual(self.nb.tabs(), tabs)
# bad moves
self.assertRaises(tkinter.TclError, self.nb.insert, 2, tabs[0])
self.assertRaises(tkinter.TclError, self.nb.insert, -1, tabs[0])
# new tab
child3 = ttk.Label(self.root)
self.nb.insert(1, child3)
self.assertEqual(self.nb.tabs(), (tabs[0], str(child3), tabs[1]))
self.nb.forget(child3)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert(self.child1, child3)
self.assertEqual(self.nb.tabs(), (str(child3), ) + tabs)
self.nb.forget(child3)
self.assertRaises(tkinter.TclError, self.nb.insert, 2, child3)
self.assertRaises(tkinter.TclError, self.nb.insert, -1, child3)
# bad inserts
self.assertRaises(tkinter.TclError, self.nb.insert, 'end', None)
self.assertRaises(tkinter.TclError, self.nb.insert, None, 0)
self.assertRaises(tkinter.TclError, self.nb.insert, None, None)
def test_select(self):
self.nb.pack()
self.nb.wait_visibility()
success = []
tab_changed = []
self.child1.bind('<Unmap>', lambda evt: success.append(True))
self.nb.bind('<<NotebookTabChanged>>',
lambda evt: tab_changed.append(True))
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.select(self.child2)
self.assertTrue(success)
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.update()
self.assertTrue(tab_changed)
def test_tab(self):
self.assertRaises(tkinter.TclError, self.nb.tab, -1)
self.assertRaises(tkinter.TclError, self.nb.tab, 'notab')
self.assertRaises(tkinter.TclError, self.nb.tab, None)
self.assertIsInstance(self.nb.tab(self.child1), dict)
self.assertEqual(self.nb.tab(self.child1, text=None), 'a')
# newer form for querying a single option
self.assertEqual(self.nb.tab(self.child1, 'text'), 'a')
self.nb.tab(self.child1, text='abc')
self.assertEqual(self.nb.tab(self.child1, text=None), 'abc')
self.assertEqual(self.nb.tab(self.child1, 'text'), 'abc')
def test_tabs(self):
self.assertEqual(len(self.nb.tabs()), 2)
self.nb.forget(self.child1)
self.nb.forget(self.child2)
self.assertEqual(self.nb.tabs(), ())
def test_traversal(self):
self.nb.pack()
self.nb.wait_visibility()
self.nb.select(0)
simulate_mouse_click(self.nb, 5, 5)
self.nb.focus_force()
self.nb.event_generate('<Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.tab(self.child1, text='a', underline=0)
self.nb.enable_traversal()
self.nb.focus_force()
simulate_mouse_click(self.nb, 5, 5)
if sys.platform == 'darwin':
self.nb.event_generate('<Option-a>')
else:
self.nb.event_generate('<Alt-a>')
self.assertEqual(self.nb.select(), str(self.child1))
@add_standard_options(StandardTtkOptionsTests)
class TreeviewTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'columns', 'cursor', 'displaycolumns',
'height', 'padding', 'selectmode', 'show',
'style', 'takefocus', 'xscrollcommand', 'yscrollcommand',
)
def setUp(self):
super().setUp()
self.tv = self.create(padding=0)
def create(self, **kwargs):
return ttk.Treeview(self.root, **kwargs)
def test_columns(self):
widget = self.create()
self.checkParam(widget, 'columns', 'a b c',
expected=('a', 'b', 'c'))
self.checkParam(widget, 'columns', ('a', 'b', 'c'))
self.checkParam(widget, 'columns', ())
def test_displaycolumns(self):
widget = self.create()
widget['columns'] = ('a', 'b', 'c')
self.checkParam(widget, 'displaycolumns', 'b a c',
expected=('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', ('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', '#all',
expected=('#all',))
self.checkParam(widget, 'displaycolumns', (2, 1, 0))
self.checkInvalidParam(widget, 'displaycolumns', ('a', 'b', 'd'),
errmsg='Invalid column index d')
self.checkInvalidParam(widget, 'displaycolumns', (1, 2, 3),
errmsg='Column index 3 out of bounds')
self.checkInvalidParam(widget, 'displaycolumns', (1, -2),
errmsg='Column index -2 out of bounds')
def test_height(self):
widget = self.create()
self.checkPixelsParam(widget, 'height', 100, -100, 0, '3c', conv=False)
self.checkPixelsParam(widget, 'height', 101.2, 102.6, conv=noconv)
def test_selectmode(self):
widget = self.create()
self.checkEnumParam(widget, 'selectmode',
'none', 'browse', 'extended')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', 'tree headings',
expected=('tree', 'headings'))
self.checkParam(widget, 'show', ('tree', 'headings'))
self.checkParam(widget, 'show', ('headings', 'tree'))
self.checkParam(widget, 'show', 'tree', expected=('tree',))
self.checkParam(widget, 'show', 'headings', expected=('headings',))
def test_bbox(self):
self.tv.pack()
self.assertEqual(self.tv.bbox(''), '')
self.tv.wait_visibility()
self.tv.update()
item_id = self.tv.insert('', 'end')
children = self.tv.get_children()
self.assertTrue(children)
bbox = self.tv.bbox(children[0])
self.assertIsBoundingBox(bbox)
# compare width in bboxes
self.tv['columns'] = ['test']
self.tv.column('test', width=50)
bbox_column0 = self.tv.bbox(children[0], 0)
root_width = self.tv.column('#0', width=None)
if not self.wantobjects:
root_width = int(root_width)
self.assertEqual(bbox_column0[0], bbox[0] + root_width)
# verify that bbox of a closed item is the empty string
child1 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.bbox(child1), '')
def test_children(self):
# no children yet, should get an empty tuple
self.assertEqual(self.tv.get_children(), ())
item_id = self.tv.insert('', 'end')
self.assertIsInstance(self.tv.get_children(), tuple)
self.assertEqual(self.tv.get_children()[0], item_id)
# add item_id and child3 as children of child2
child2 = self.tv.insert('', 'end')
child3 = self.tv.insert('', 'end')
self.tv.set_children(child2, item_id, child3)
self.assertEqual(self.tv.get_children(child2), (item_id, child3))
# child3 has child2 as parent, thus trying to set child2 as a children
# of child3 should result in an error
self.assertRaises(tkinter.TclError,
self.tv.set_children, child3, child2)
# remove child2 children
self.tv.set_children(child2)
self.assertEqual(self.tv.get_children(child2), ())
# remove root's children
self.tv.set_children('')
self.assertEqual(self.tv.get_children(), ())
def test_column(self):
# return a dict with all options/values
self.assertIsInstance(self.tv.column('#0'), dict)
# return a single value of the given option
if self.wantobjects:
self.assertIsInstance(self.tv.column('#0', width=None), int)
# set a new value for an option
self.tv.column('#0', width=10)
# testing new way to get option value
self.assertEqual(self.tv.column('#0', 'width'),
10 if self.wantobjects else '10')
self.assertEqual(self.tv.column('#0', width=None),
10 if self.wantobjects else '10')
# check read-only option
self.assertRaises(tkinter.TclError, self.tv.column, '#0', id='X')
self.assertRaises(tkinter.TclError, self.tv.column, 'invalid')
invalid_kws = [
{'unknown_option': 'some value'}, {'stretch': 'wrong'},
{'anchor': 'wrong'}, {'width': 'wrong'}, {'minwidth': 'wrong'}
]
for kw in invalid_kws:
self.assertRaises(tkinter.TclError, self.tv.column, '#0',
**kw)
def test_delete(self):
self.assertRaises(tkinter.TclError, self.tv.delete, '#0')
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
self.tv.delete(item_id)
self.assertFalse(self.tv.get_children())
# reattach should fail
self.assertRaises(tkinter.TclError,
self.tv.reattach, item_id, '', 'end')
# test multiple item delete
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
self.assertEqual(self.tv.get_children(), (item1, item2))
self.tv.delete(item1, item2)
self.assertFalse(self.tv.get_children())
def test_detach_reattach(self):
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
# calling detach without items is valid, although it does nothing
prev = self.tv.get_children()
self.tv.detach() # this should do nothing
self.assertEqual(prev, self.tv.get_children())
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# detach item with children
self.tv.detach(item_id)
self.assertFalse(self.tv.get_children())
# reattach item with children
self.tv.reattach(item_id, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# move a children to the root
self.tv.move(item2, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, item2))
self.assertEqual(self.tv.get_children(item_id), ())
# bad values
self.assertRaises(tkinter.TclError,
self.tv.reattach, 'nonexistent', '', 'end')
self.assertRaises(tkinter.TclError,
self.tv.detach, 'nonexistent')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, 'otherparent', 'end')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, '', 'invalid')
# multiple detach
self.tv.detach(item_id, item2)
self.assertEqual(self.tv.get_children(), ())
self.assertEqual(self.tv.get_children(item_id), ())
def test_exists(self):
self.assertEqual(self.tv.exists('something'), False)
self.assertEqual(self.tv.exists(''), True)
self.assertEqual(self.tv.exists({}), False)
# the following will make a tk.call equivalent to
# tk.call(treeview, "exists") which should result in an error
# in the tcl interpreter since tk requires an item.
self.assertRaises(tkinter.TclError, self.tv.exists, None)
def test_focus(self):
# nothing is focused right now
self.assertEqual(self.tv.focus(), '')
item1 = self.tv.insert('', 'end')
self.tv.focus(item1)
self.assertEqual(self.tv.focus(), item1)
self.tv.delete(item1)
self.assertEqual(self.tv.focus(), '')
# try focusing inexistent item
self.assertRaises(tkinter.TclError, self.tv.focus, 'hi')
def test_heading(self):
# check a dict is returned
self.assertIsInstance(self.tv.heading('#0'), dict)
# check a value is returned
self.tv.heading('#0', text='hi')
self.assertEqual(self.tv.heading('#0', 'text'), 'hi')
self.assertEqual(self.tv.heading('#0', text=None), 'hi')
# invalid option
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
background=None)
# invalid value
self.assertRaises(tkinter.TclError, self.tv.heading, '#0',
anchor=1)
def test_heading_callback(self):
def simulate_heading_click(x, y):
simulate_mouse_click(self.tv, x, y)
self.tv.update()
success = [] # no success for now
self.tv.pack()
self.tv.wait_visibility()
self.tv.heading('#0', command=lambda: success.append(True))
self.tv.column('#0', width=100)
self.tv.update()
# assuming that the coords (5, 5) fall into heading #0
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
success = []
commands = self.tv.master._tclCommands
self.tv.heading('#0', command=str(self.tv.heading('#0', command=None)))
self.assertEqual(commands, self.tv.master._tclCommands)
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
# XXX The following raises an error in a tcl interpreter, but not in
# Python
#self.tv.heading('#0', command='I dont exist')
#simulate_heading_click(5, 5)
def test_index(self):
# item 'what' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.index, 'what')
self.assertEqual(self.tv.index(''), 0)
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
c1 = self.tv.insert(item1, 'end')
c2 = self.tv.insert(item1, 'end')
self.assertEqual(self.tv.index(item1), 0)
self.assertEqual(self.tv.index(c1), 0)
self.assertEqual(self.tv.index(c2), 1)
self.assertEqual(self.tv.index(item2), 1)
self.tv.move(item2, '', 0)
self.assertEqual(self.tv.index(item2), 0)
self.assertEqual(self.tv.index(item1), 1)
# check that index still works even after its parent and siblings
# have been detached
self.tv.detach(item1)
self.assertEqual(self.tv.index(c2), 1)
self.tv.detach(c1)
self.assertEqual(self.tv.index(c2), 0)
# but it fails after item has been deleted
self.tv.delete(item1)
self.assertRaises(tkinter.TclError, self.tv.index, c2)
def test_insert_item(self):
# parent 'none' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.insert, 'none', 'end')
# open values
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='please')
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=True)))
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=False)))
# invalid index
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'middle')
# trying to duplicate item id is invalid
itemid = self.tv.insert('', 'end', 'first-item')
self.assertEqual(itemid, 'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
MockTclObj('first-item'))
# unicode values
value = '\xe1ba'
item = self.tv.insert('', 'end', values=(value, ))
self.assertEqual(self.tv.item(item, 'values'),
(value,) if self.wantobjects else value)
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.tv.item(item, values=self.root.splitlist(self.tv.item(item, values=None)))
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.assertIsInstance(self.tv.item(item), dict)
# erase item values
self.tv.item(item, values='')
self.assertFalse(self.tv.item(item, values=None))
# item tags
item = self.tv.insert('', 'end', tags=[1, 2, value])
self.assertEqual(self.tv.item(item, tags=None),
('1', '2', value) if self.wantobjects else
'1 2 %s' % value)
self.tv.item(item, tags=[])
self.assertFalse(self.tv.item(item, tags=None))
self.tv.item(item, tags=(1, 2))
self.assertEqual(self.tv.item(item, tags=None),
('1', '2') if self.wantobjects else '1 2')
# values with spaces
item = self.tv.insert('', 'end', values=('a b c',
'%s %s' % (value, value)))
self.assertEqual(self.tv.item(item, values=None),
('a b c', '%s %s' % (value, value)) if self.wantobjects else
'{a b c} {%s %s}' % (value, value))
# text
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text="Label here"), text=None),
"Label here")
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text=value), text=None),
value)
def test_set(self):
self.tv['columns'] = ['A', 'B']
item = self.tv.insert('', 'end', values=['a', 'b'])
self.assertEqual(self.tv.set(item), {'A': 'a', 'B': 'b'})
self.tv.set(item, 'B', 'a')
self.assertEqual(self.tv.item(item, values=None),
('a', 'a') if self.wantobjects else 'a a')
self.tv['columns'] = ['B']
self.assertEqual(self.tv.set(item), {'B': 'a'})
self.tv.set(item, 'B', 'b')
self.assertEqual(self.tv.set(item, column='B'), 'b')
self.assertEqual(self.tv.item(item, values=None),
('b', 'a') if self.wantobjects else 'b a')
self.tv.set(item, 'B', 123)
self.assertEqual(self.tv.set(item, 'B'),
123 if self.wantobjects else '123')
self.assertEqual(self.tv.item(item, values=None),
(123, 'a') if self.wantobjects else '123 a')
self.assertEqual(self.tv.set(item),
{'B': 123} if self.wantobjects else {'B': '123'})
# inexistent column
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A')
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A', 'b')
# inexistent item
self.assertRaises(tkinter.TclError, self.tv.set, 'notme')
def test_tag_bind(self):
events = []
item1 = self.tv.insert('', 'end', tags=['call'])
item2 = self.tv.insert('', 'end', tags=['call'])
self.tv.tag_bind('call', '<ButtonPress-1>',
lambda evt: events.append(1))
self.tv.tag_bind('call', '<ButtonRelease-1>',
lambda evt: events.append(2))
self.tv.pack()
self.tv.wait_visibility()
self.tv.update()
pos_y = set()
found = set()
for i in range(0, 100, 10):
if len(found) == 2: # item1 and item2 already found
break
item_id = self.tv.identify_row(i)
if item_id and item_id not in found:
pos_y.add(i)
found.add(item_id)
self.assertEqual(len(pos_y), 2) # item1 and item2 y pos
for y in pos_y:
simulate_mouse_click(self.tv, 0, y)
# by now there should be 4 things in the events list, since each
# item had a bind for two events that were simulated above
self.assertEqual(len(events), 4)
for evt in zip(events[::2], events[1::2]):
self.assertEqual(evt, (1, 2))
def test_tag_configure(self):
# Just testing parameter passing for now
self.assertRaises(TypeError, self.tv.tag_configure)
self.assertRaises(tkinter.TclError, self.tv.tag_configure,
'test', sky='blue')
self.tv.tag_configure('test', foreground='blue')
self.assertEqual(str(self.tv.tag_configure('test', 'foreground')),
'blue')
self.assertEqual(str(self.tv.tag_configure('test', foreground=None)),
'blue')
self.assertIsInstance(self.tv.tag_configure('test'), dict)
@add_standard_options(StandardTtkOptionsTests)
class SeparatorTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'style', 'takefocus',
# 'state'?
)
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Separator(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class SizegripTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'style', 'takefocus',
# 'state'?
)
def create(self, **kwargs):
return ttk.Sizegrip(self.root, **kwargs)
tests_gui = (
ButtonTest, CheckbuttonTest, ComboboxTest, EntryTest,
FrameTest, LabelFrameTest, LabelTest, MenubuttonTest,
NotebookTest, PanedWindowTest, ProgressbarTest,
RadiobuttonTest, ScaleTest, ScrollbarTest, SeparatorTest,
SizegripTest, TreeviewTest, WidgetTest,
)
if __name__ == "__main__":
unittest.main()
| 35.539712 | 87 | 0.595305 | import unittest
import tkinter
from tkinter import ttk
from test.support import requires
import sys
from tkinter.test.test_ttk.test_functions import MockTclObj
from tkinter.test.support import (AbstractTkTest, tcl_version, get_tk_patchlevel,
simulate_mouse_click)
from tkinter.test.widget_tests import (add_standard_options, noconv,
AbstractWidgetTest, StandardOptionsTests, IntegerSizeTests, PixelSizeTests,
setUpModule)
requires('gui')
class StandardTtkOptionsTests(StandardOptionsTests):
def test_class(self):
widget = self.create()
self.assertEqual(widget['class'], '')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0):
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'class', 'Foo', errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
def test_padding(self):
widget = self.create()
self.checkParam(widget, 'padding', 0, expected=('0',))
self.checkParam(widget, 'padding', 5, expected=('5',))
self.checkParam(widget, 'padding', (5, 6), expected=('5', '6'))
self.checkParam(widget, 'padding', (5, 6, 7),
expected=('5', '6', '7'))
self.checkParam(widget, 'padding', (5, 6, 7, 8),
expected=('5', '6', '7', '8'))
self.checkParam(widget, 'padding', ('5p', '6p', '7p', '8p'))
self.checkParam(widget, 'padding', (), expected='')
def test_style(self):
widget = self.create()
self.assertEqual(widget['style'], '')
errmsg = 'Layout Foo not found'
if hasattr(self, 'default_orient'):
errmsg = ('Layout %s.Foo not found' %
getattr(self, 'default_orient').title())
self.checkInvalidParam(widget, 'style', 'Foo',
errmsg=errmsg)
widget2 = self.create(class_='Foo')
self.assertEqual(widget2['class'], 'Foo')
pass
class WidgetTest(AbstractTkTest, unittest.TestCase):
def setUp(self):
super().setUp()
self.widget = ttk.Button(self.root, width=0, text="Text")
self.widget.pack()
self.widget.wait_visibility()
def test_identify(self):
self.widget.update_idletasks()
self.assertEqual(self.widget.identify(
int(self.widget.winfo_width() / 2),
int(self.widget.winfo_height() / 2)
), "label")
self.assertEqual(self.widget.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.widget.identify, None, 5)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, None)
self.assertRaises(tkinter.TclError, self.widget.identify, 5, '')
def test_widget_state(self):
self.assertEqual(self.widget.state(), ())
self.assertEqual(self.widget.instate(['!disabled']), True)
self.assertEqual(self.widget.state(['disabled']), ('!disabled', ))
self.assertEqual(self.widget.state(['disabled']), ())
self.assertEqual(self.widget.state(['!disabled', 'active']),
('!active', 'disabled'))
self.assertEqual(self.widget.state(['!disabled', 'active']), ())
self.assertEqual(self.widget.state(['active', '!disabled']), ())
def test_cb(arg1, **kw):
return arg1, kw
self.assertEqual(self.widget.instate(['!disabled'],
test_cb, "hi", **{"msg": "there"}),
('hi', {'msg': 'there'}))
currstate = self.widget.state()
self.assertRaises(tkinter.TclError, self.widget.instate,
['badstate'])
self.assertRaises(tkinter.TclError, self.widget.instate,
['disabled', 'badstate'])
self.assertEqual(currstate, self.widget.state())
# ensuring that passing None as state doesn't modify current state
self.widget.state(['active', '!disabled'])
self.assertEqual(self.widget.state(), ('active', ))
class AbstractToplevelTest(AbstractWidgetTest, PixelSizeTests):
_conv_pixels = noconv
@add_standard_options(StandardTtkOptionsTests)
class FrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'padding', 'relief', 'style', 'takefocus',
'width',
)
def create(self, **kwargs):
return ttk.Frame(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class LabelFrameTest(AbstractToplevelTest, unittest.TestCase):
OPTIONS = (
'borderwidth', 'class', 'cursor', 'height',
'labelanchor', 'labelwidget',
'padding', 'relief', 'style', 'takefocus',
'text', 'underline', 'width',
)
def create(self, **kwargs):
return ttk.LabelFrame(self.root, **kwargs)
def test_labelanchor(self):
widget = self.create()
self.checkEnumParam(widget, 'labelanchor',
'e', 'en', 'es', 'n', 'ne', 'nw', 's', 'se', 'sw', 'w', 'wn', 'ws',
errmsg='Bad label anchor specification {}')
self.checkInvalidParam(widget, 'labelanchor', 'center')
def test_labelwidget(self):
widget = self.create()
label = ttk.Label(self.root, text='Mupp', name='foo')
self.checkParam(widget, 'labelwidget', label, expected='.foo')
label.destroy()
class AbstractLabelTest(AbstractWidgetTest):
def checkImageParam(self, widget, name):
image = tkinter.PhotoImage(master=self.root, name='image1')
image2 = tkinter.PhotoImage(master=self.root, name='image2')
self.checkParam(widget, name, image, expected=('image1',))
self.checkParam(widget, name, 'image1', expected=('image1',))
self.checkParam(widget, name, (image,), expected=('image1',))
self.checkParam(widget, name, (image, 'active', image2),
expected=('image1', 'active', 'image2'))
self.checkParam(widget, name, 'image1 active image2',
expected=('image1', 'active', 'image2'))
self.checkInvalidParam(widget, name, 'spam',
errmsg='image "spam" doesn\'t exist')
def test_compound(self):
widget = self.create()
self.checkEnumParam(widget, 'compound',
'none', 'text', 'image', 'center',
'top', 'bottom', 'left', 'right')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def test_width(self):
widget = self.create()
self.checkParams(widget, 'width', 402, -402, 0)
@add_standard_options(StandardTtkOptionsTests)
class LabelTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'anchor', 'background',
'class', 'compound', 'cursor', 'font', 'foreground',
'image', 'justify', 'padding', 'relief', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width', 'wraplength',
)
_conv_pixels = noconv
def create(self, **kwargs):
return ttk.Label(self.root, **kwargs)
def test_font(self):
widget = self.create()
self.checkParam(widget, 'font',
'-Adobe-Helvetica-Medium-R-Normal--*-120-*-*-*-*-*-*')
@add_standard_options(StandardTtkOptionsTests)
class ButtonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor', 'default',
'image', 'state', 'style', 'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Button(self.root, **kwargs)
def test_default(self):
widget = self.create()
self.checkEnumParam(widget, 'default', 'normal', 'active', 'disabled')
def test_invoke(self):
success = []
btn = ttk.Button(self.root, command=lambda: success.append(1))
btn.invoke()
self.assertTrue(success)
@add_standard_options(StandardTtkOptionsTests)
class CheckbuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'offvalue', 'onvalue',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Checkbutton(self.root, **kwargs)
def test_offvalue(self):
widget = self.create()
self.checkParams(widget, 'offvalue', 1, 2.3, '', 'any string')
def test_onvalue(self):
widget = self.create()
self.checkParams(widget, 'onvalue', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
cbtn = ttk.Checkbutton(self.root, command=cb_test)
# the variable automatically created by ttk.Checkbutton is actually
# undefined till we invoke the Checkbutton
self.assertEqual(cbtn.state(), ('alternate', ))
self.assertRaises(tkinter.TclError, cbtn.tk.globalgetvar,
cbtn['variable'])
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(cbtn['onvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
self.assertTrue(success)
cbtn['command'] = ''
res = cbtn.invoke()
self.assertFalse(str(res))
self.assertLessEqual(len(success), 1)
self.assertEqual(cbtn['offvalue'],
cbtn.tk.globalgetvar(cbtn['variable']))
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class ComboboxTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'exportselection', 'height',
'justify', 'postcommand', 'state', 'style',
'takefocus', 'textvariable', 'values', 'width',
)
def setUp(self):
super().setUp()
self.combo = self.create()
def create(self, **kwargs):
return ttk.Combobox(self.root, **kwargs)
def test_height(self):
widget = self.create()
self.checkParams(widget, 'height', 100, 101.2, 102.6, -100, 0, '1i')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state', 'active', 'disabled', 'normal')
def _show_drop_down_listbox(self):
width = self.combo.winfo_width()
self.combo.event_generate('<ButtonPress-1>', x=width - 5, y=5)
self.combo.event_generate('<ButtonRelease-1>', x=width - 5, y=5)
self.combo.update_idletasks()
def test_virtual_event(self):
success = []
self.combo['values'] = [1]
self.combo.bind('<<ComboboxSelected>>',
lambda evt: success.append(True))
self.combo.pack()
self.combo.wait_visibility()
height = self.combo.winfo_height()
self._show_drop_down_listbox()
self.combo.update()
self.combo.event_generate('<Return>')
self.combo.update()
self.assertTrue(success)
def test_postcommand(self):
success = []
self.combo['postcommand'] = lambda: success.append(True)
self.combo.pack()
self.combo.wait_visibility()
self._show_drop_down_listbox()
self.assertTrue(success)
# testing postcommand removal
self.combo['postcommand'] = ''
self._show_drop_down_listbox()
self.assertEqual(len(success), 1)
def test_values(self):
def check_get_current(getval, currval):
self.assertEqual(self.combo.get(), getval)
self.assertEqual(self.combo.current(), currval)
self.assertEqual(self.combo['values'],
() if tcl_version < (8, 5) else '')
check_get_current('', -1)
self.checkParam(self.combo, 'values', 'mon tue wed thur',
expected=('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', ('mon', 'tue', 'wed', 'thur'))
self.checkParam(self.combo, 'values', (42, 3.14, '', 'any string'))
self.checkParam(self.combo, 'values', '', expected=())
self.combo['values'] = ['a', 1, 'c']
self.combo.set('c')
check_get_current('c', 2)
self.combo.current(0)
check_get_current('a', 0)
self.combo.set('d')
check_get_current('d', -1)
# testing values with empty string
self.combo.set('')
self.combo['values'] = (1, 2, '', 3)
check_get_current('', 2)
# testing values with empty string set through configure
self.combo.configure(values=[1, '', 2])
self.assertEqual(self.combo['values'],
('1', '', '2') if self.wantobjects else
'1 {} 2')
# testing values with spaces
self.combo['values'] = ['a b', 'a\tb', 'a\nb']
self.assertEqual(self.combo['values'],
('a b', 'a\tb', 'a\nb') if self.wantobjects else
'{a b} {a\tb} {a\nb}')
# testing values with special characters
self.combo['values'] = [r'a\tb', '"a"', '} {']
self.assertEqual(self.combo['values'],
(r'a\tb', '"a"', '} {') if self.wantobjects else
r'a\\tb {"a"} \}\ \{')
# out of range
self.assertRaises(tkinter.TclError, self.combo.current,
len(self.combo['values']))
# it expects an integer (or something that can be converted to int)
self.assertRaises(tkinter.TclError, self.combo.current, '')
# testing creating combobox with empty string in values
combo2 = ttk.Combobox(self.root, values=[1, 2, ''])
self.assertEqual(combo2['values'],
('1', '2', '') if self.wantobjects else '1 2 {}')
combo2.destroy()
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class EntryTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'background', 'class', 'cursor',
'exportselection', 'font',
'invalidcommand', 'justify',
'show', 'state', 'style', 'takefocus', 'textvariable',
'validate', 'validatecommand', 'width', 'xscrollcommand',
)
def setUp(self):
super().setUp()
self.entry = self.create()
def create(self, **kwargs):
return ttk.Entry(self.root, **kwargs)
def test_invalidcommand(self):
widget = self.create()
self.checkCommandParam(widget, 'invalidcommand')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', '*')
self.checkParam(widget, 'show', '')
self.checkParam(widget, 'show', ' ')
def test_state(self):
widget = self.create()
self.checkParams(widget, 'state',
'disabled', 'normal', 'readonly')
def test_validate(self):
widget = self.create()
self.checkEnumParam(widget, 'validate',
'all', 'key', 'focus', 'focusin', 'focusout', 'none')
def test_validatecommand(self):
widget = self.create()
self.checkCommandParam(widget, 'validatecommand')
def test_bbox(self):
self.assertIsBoundingBox(self.entry.bbox(0))
self.assertRaises(tkinter.TclError, self.entry.bbox, 'noindex')
self.assertRaises(tkinter.TclError, self.entry.bbox, None)
def test_identify(self):
self.entry.pack()
self.entry.wait_visibility()
self.entry.update_idletasks()
self.assertEqual(self.entry.identify(5, 5), "textarea")
self.assertEqual(self.entry.identify(-1, -1), "")
self.assertRaises(tkinter.TclError, self.entry.identify, None, 5)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, None)
self.assertRaises(tkinter.TclError, self.entry.identify, 5, '')
def test_validation_options(self):
success = []
test_invalid = lambda: success.append(True)
self.entry['validate'] = 'none'
self.entry['validatecommand'] = lambda: False
self.entry['invalidcommand'] = test_invalid
self.entry.validate()
self.assertTrue(success)
self.entry['invalidcommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['invalidcommand'] = test_invalid
self.entry['validatecommand'] = lambda: True
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = ''
self.entry.validate()
self.assertEqual(len(success), 1)
self.entry['validatecommand'] = True
self.assertRaises(tkinter.TclError, self.entry.validate)
def test_validation(self):
validation = []
def validate(to_insert):
if not 'a' <= to_insert.lower() <= 'z':
validation.append(False)
return False
validation.append(True)
return True
self.entry['validate'] = 'key'
self.entry['validatecommand'] = self.entry.register(validate), '%S'
self.entry.insert('end', 1)
self.entry.insert('end', 'a')
self.assertEqual(validation, [False, True])
self.assertEqual(self.entry.get(), 'a')
def test_revalidation(self):
def validate(content):
for letter in content:
if not 'a' <= letter.lower() <= 'z':
return False
return True
self.entry['validatecommand'] = self.entry.register(validate), '%P'
self.entry.insert('end', 'avocado')
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
self.entry.delete(0, 'end')
self.assertEqual(self.entry.get(), '')
self.entry.insert('end', 'a1b')
self.assertEqual(self.entry.validate(), False)
self.assertEqual(self.entry.state(), ('invalid', ))
self.entry.delete(1)
self.assertEqual(self.entry.validate(), True)
self.assertEqual(self.entry.state(), ())
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class PanedWindowTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height',
'orient', 'style', 'takefocus', 'width',
)
def setUp(self):
super().setUp()
self.paned = self.create()
def create(self, **kwargs):
return ttk.PanedWindow(self.root, **kwargs)
def test_orient(self):
widget = self.create()
self.assertEqual(str(widget['orient']), 'vertical')
errmsg='attempt to change read-only option'
if get_tk_patchlevel() < (8, 6, 0): # actually this was changed in 8.6b3
errmsg='Attempt to change read-only option'
self.checkInvalidParam(widget, 'orient', 'horizontal',
errmsg=errmsg)
widget2 = self.create(orient='horizontal')
self.assertEqual(str(widget2['orient']), 'horizontal')
def test_add(self):
# attempt to add a child that is not a direct child of the paned window
label = ttk.Label(self.paned)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
label.destroy()
child.destroy()
# another attempt
label = ttk.Label(self.root)
child = ttk.Label(label)
self.assertRaises(tkinter.TclError, self.paned.add, child)
child.destroy()
label.destroy()
good_child = ttk.Label(self.root)
self.paned.add(good_child)
# re-adding a child is not accepted
self.assertRaises(tkinter.TclError, self.paned.add, good_child)
other_child = ttk.Label(self.paned)
self.paned.add(other_child)
self.assertEqual(self.paned.pane(0), self.paned.pane(1))
self.assertRaises(tkinter.TclError, self.paned.pane, 2)
good_child.destroy()
other_child.destroy()
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.paned.forget, None)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
self.paned.add(ttk.Label(self.root))
self.paned.forget(0)
self.assertRaises(tkinter.TclError, self.paned.forget, 0)
def test_insert(self):
self.assertRaises(tkinter.TclError, self.paned.insert, None, 0)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, None)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, 0)
child = ttk.Label(self.root)
child2 = ttk.Label(self.root)
child3 = ttk.Label(self.root)
self.assertRaises(tkinter.TclError, self.paned.insert, 0, child)
self.paned.insert('end', child2)
self.paned.insert(0, child)
self.assertEqual(self.paned.panes(), (str(child), str(child2)))
self.paned.insert(0, child2)
self.assertEqual(self.paned.panes(), (str(child2), str(child)))
self.paned.insert('end', child3)
self.assertEqual(self.paned.panes(),
(str(child2), str(child), str(child3)))
# reinserting a child should move it to its current position
panes = self.paned.panes()
self.paned.insert('end', child3)
self.assertEqual(panes, self.paned.panes())
# moving child3 to child2 position should result in child2 ending up
# in previous child position and child ending up in previous child3
# position
self.paned.insert(child2, child3)
self.assertEqual(self.paned.panes(),
(str(child3), str(child2), str(child)))
def test_pane(self):
self.assertRaises(tkinter.TclError, self.paned.pane, 0)
child = ttk.Label(self.root)
self.paned.add(child)
self.assertIsInstance(self.paned.pane(0), dict)
self.assertEqual(self.paned.pane(0, weight=None),
0 if self.wantobjects else '0')
# newer form for querying a single option
self.assertEqual(self.paned.pane(0, 'weight'),
0 if self.wantobjects else '0')
self.assertEqual(self.paned.pane(0), self.paned.pane(str(child)))
self.assertRaises(tkinter.TclError, self.paned.pane, 0,
badoption='somevalue')
def test_sashpos(self):
self.assertRaises(tkinter.TclError, self.paned.sashpos, None)
self.assertRaises(tkinter.TclError, self.paned.sashpos, '')
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child = ttk.Label(self.paned, text='a')
self.paned.add(child, weight=1)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 0)
child2 = ttk.Label(self.paned, text='b')
self.paned.add(child2)
self.assertRaises(tkinter.TclError, self.paned.sashpos, 1)
self.paned.pack(expand=True, fill='both')
self.paned.wait_visibility()
curr_pos = self.paned.sashpos(0)
self.paned.sashpos(0, 1000)
self.assertNotEqual(curr_pos, self.paned.sashpos(0))
self.assertIsInstance(self.paned.sashpos(0), int)
@add_standard_options(StandardTtkOptionsTests)
class RadiobuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'compound', 'cursor',
'image',
'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'value', 'variable', 'width',
)
def create(self, **kwargs):
return ttk.Radiobutton(self.root, **kwargs)
def test_value(self):
widget = self.create()
self.checkParams(widget, 'value', 1, 2.3, '', 'any string')
def test_invoke(self):
success = []
def cb_test():
success.append(1)
return "cb test called"
myvar = tkinter.IntVar(self.root)
cbtn = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=0)
cbtn2 = ttk.Radiobutton(self.root, command=cb_test,
variable=myvar, value=1)
if self.wantobjects:
conv = lambda x: x
else:
conv = int
res = cbtn.invoke()
self.assertEqual(res, "cb test called")
self.assertEqual(conv(cbtn['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertTrue(success)
cbtn2['command'] = ''
res = cbtn2.invoke()
self.assertEqual(str(res), '')
self.assertLessEqual(len(success), 1)
self.assertEqual(conv(cbtn2['value']), myvar.get())
self.assertEqual(myvar.get(),
conv(cbtn.tk.globalgetvar(cbtn['variable'])))
self.assertEqual(str(cbtn['variable']), str(cbtn2['variable']))
class MenubuttonTest(AbstractLabelTest, unittest.TestCase):
OPTIONS = (
'class', 'compound', 'cursor', 'direction',
'image', 'menu', 'state', 'style',
'takefocus', 'text', 'textvariable',
'underline', 'width',
)
def create(self, **kwargs):
return ttk.Menubutton(self.root, **kwargs)
def test_direction(self):
widget = self.create()
self.checkEnumParam(widget, 'direction',
'above', 'below', 'left', 'right', 'flush')
def test_menu(self):
widget = self.create()
menu = tkinter.Menu(widget, name='menu')
self.checkParam(widget, 'menu', menu, conv=str)
menu.destroy()
@add_standard_options(StandardTtkOptionsTests)
class ScaleTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'from', 'length',
'orient', 'style', 'takefocus', 'to', 'value', 'variable',
)
_conv_pixels = noconv
default_orient = 'horizontal'
def setUp(self):
super().setUp()
self.scale = self.create()
self.scale.pack()
self.scale.update()
def create(self, **kwargs):
return ttk.Scale(self.root, **kwargs)
def test_from(self):
widget = self.create()
self.checkFloatParam(widget, 'from', 100, 14.9, 15.1, conv=False)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 130, 131.2, 135.6, '5i')
def test_to(self):
widget = self.create()
self.checkFloatParam(widget, 'to', 300, 14.9, 15.1, -10, conv=False)
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 300, 14.9, 15.1, -10, conv=False)
def test_custom_event(self):
failure = [1, 1, 1] # will need to be empty
funcid = self.scale.bind('<<RangeChanged>>', lambda evt: failure.pop())
self.scale['from'] = 10
self.scale['from_'] = 10
self.scale['to'] = 3
self.assertFalse(failure)
failure = [1, 1, 1]
self.scale.configure(from_=2, to=5)
self.scale.configure(from_=0, to=-2)
self.scale.configure(to=10)
self.assertFalse(failure)
def test_get(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
scale_width = self.scale.winfo_width()
self.assertEqual(self.scale.get(scale_width, 0), self.scale['to'])
self.assertEqual(conv(self.scale.get(0, 0)), conv(self.scale['from']))
self.assertEqual(self.scale.get(), self.scale['value'])
self.scale['value'] = 30
self.assertEqual(self.scale.get(), self.scale['value'])
self.assertRaises(tkinter.TclError, self.scale.get, '', 0)
self.assertRaises(tkinter.TclError, self.scale.get, 0, '')
def test_set(self):
if self.wantobjects:
conv = lambda x: x
else:
conv = float
# set restricts the max/min values according to the current range
max = conv(self.scale['to'])
new_max = max + 10
self.scale.set(new_max)
self.assertEqual(conv(self.scale.get()), max)
min = conv(self.scale['from'])
self.scale.set(min - 1)
self.assertEqual(conv(self.scale.get()), min)
# changing directly the variable doesn't impose this limitation tho
var = tkinter.DoubleVar(self.root)
self.scale['variable'] = var
var.set(max + 5)
self.assertEqual(conv(self.scale.get()), var.get())
self.assertEqual(conv(self.scale.get()), max + 5)
del var
self.scale['value'] = max + 10
self.assertEqual(conv(self.scale.get()), max + 10)
self.assertEqual(conv(self.scale.get()), conv(self.scale['value']))
self.assertEqual(conv(self.scale.get(0, 0)), min)
self.assertEqual(conv(self.scale.get(self.scale.winfo_width(), 0)), max)
self.assertRaises(tkinter.TclError, self.scale.set, None)
@add_standard_options(StandardTtkOptionsTests)
class ProgressbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'length',
'mode', 'maximum', 'phase',
'style', 'takefocus', 'value', 'variable',
)
_conv_pixels = noconv
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Progressbar(self.root, **kwargs)
def test_length(self):
widget = self.create()
self.checkPixelsParam(widget, 'length', 100.1, 56.7, '2i')
def test_maximum(self):
widget = self.create()
self.checkFloatParam(widget, 'maximum', 150.2, 77.7, 0, -10, conv=False)
def test_mode(self):
widget = self.create()
self.checkEnumParam(widget, 'mode', 'determinate', 'indeterminate')
def test_phase(self):
pass
def test_value(self):
widget = self.create()
self.checkFloatParam(widget, 'value', 150.2, 77.7, 0, -10,
conv=False)
@unittest.skipIf(sys.platform == 'darwin',
'ttk.Scrollbar is special on MacOSX')
@add_standard_options(StandardTtkOptionsTests)
class ScrollbarTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'command', 'cursor', 'orient', 'style', 'takefocus',
)
default_orient = 'vertical'
def create(self, **kwargs):
return ttk.Scrollbar(self.root, **kwargs)
@add_standard_options(IntegerSizeTests, StandardTtkOptionsTests)
class NotebookTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'height', 'padding', 'style', 'takefocus',
)
def setUp(self):
super().setUp()
self.nb = self.create(padding=0)
self.child1 = ttk.Label(self.root)
self.child2 = ttk.Label(self.root)
self.nb.add(self.child1, text='a')
self.nb.add(self.child2, text='b')
def create(self, **kwargs):
return ttk.Notebook(self.root, **kwargs)
def test_tab_identifiers(self):
self.nb.forget(0)
self.nb.hide(self.child2)
self.assertRaises(tkinter.TclError, self.nb.tab, self.child1)
self.assertEqual(self.nb.index('end'), 1)
self.nb.add(self.child2)
self.assertEqual(self.nb.index('end'), 1)
self.nb.select(self.child2)
self.assertTrue(self.nb.tab('current'))
self.nb.add(self.child1, text='a')
self.nb.pack()
self.nb.wait_visibility()
if sys.platform == 'darwin':
tb_idx = "@20,5"
else:
tb_idx = "@5,5"
self.assertEqual(self.nb.tab(tb_idx), self.nb.tab('current'))
for i in range(5, 100, 5):
try:
if self.nb.tab('@%d, 5' % i, text=None) == 'a':
break
except tkinter.TclError:
pass
else:
self.fail("Tab with text 'a' not found")
def test_add_and_hidden(self):
self.assertRaises(tkinter.TclError, self.nb.hide, -1)
self.assertRaises(tkinter.TclError, self.nb.hide, 'hi')
self.assertRaises(tkinter.TclError, self.nb.hide, None)
self.assertRaises(tkinter.TclError, self.nb.add, None)
self.assertRaises(tkinter.TclError, self.nb.add, ttk.Label(self.root),
unknown='option')
tabs = self.nb.tabs()
self.nb.hide(self.child1)
self.nb.add(self.child1)
self.assertEqual(self.nb.tabs(), tabs)
child = ttk.Label(self.root)
self.nb.add(child, text='c')
tabs = self.nb.tabs()
curr = self.nb.index('current')
child2_index = self.nb.index(self.child2)
self.nb.hide(self.child2)
self.nb.add(self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.assertEqual(self.nb.index(self.child2), child2_index)
self.assertEqual(str(self.child2), self.nb.tabs()[child2_index])
self.assertEqual(self.nb.index('current'), curr + 1)
def test_forget(self):
self.assertRaises(tkinter.TclError, self.nb.forget, -1)
self.assertRaises(tkinter.TclError, self.nb.forget, 'hi')
self.assertRaises(tkinter.TclError, self.nb.forget, None)
tabs = self.nb.tabs()
child1_index = self.nb.index(self.child1)
self.nb.forget(self.child1)
self.assertNotIn(str(self.child1), self.nb.tabs())
self.assertEqual(len(tabs) - 1, len(self.nb.tabs()))
self.nb.add(self.child1)
self.assertEqual(self.nb.index(self.child1), 1)
self.assertNotEqual(child1_index, self.nb.index(self.child1))
def test_index(self):
self.assertRaises(tkinter.TclError, self.nb.index, -1)
self.assertRaises(tkinter.TclError, self.nb.index, None)
self.assertIsInstance(self.nb.index('end'), int)
self.assertEqual(self.nb.index(self.child1), 0)
self.assertEqual(self.nb.index(self.child2), 1)
self.assertEqual(self.nb.index('end'), 2)
def test_insert(self):
tabs = self.nb.tabs()
self.nb.insert(1, tabs[0])
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert(self.child1, self.child2)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert('end', self.child1)
self.assertEqual(self.nb.tabs(), (tabs[1], tabs[0]))
self.nb.insert('end', 0)
self.assertEqual(self.nb.tabs(), tabs)
self.assertRaises(tkinter.TclError, self.nb.insert, 2, tabs[0])
self.assertRaises(tkinter.TclError, self.nb.insert, -1, tabs[0])
child3 = ttk.Label(self.root)
self.nb.insert(1, child3)
self.assertEqual(self.nb.tabs(), (tabs[0], str(child3), tabs[1]))
self.nb.forget(child3)
self.assertEqual(self.nb.tabs(), tabs)
self.nb.insert(self.child1, child3)
self.assertEqual(self.nb.tabs(), (str(child3), ) + tabs)
self.nb.forget(child3)
self.assertRaises(tkinter.TclError, self.nb.insert, 2, child3)
self.assertRaises(tkinter.TclError, self.nb.insert, -1, child3)
self.assertRaises(tkinter.TclError, self.nb.insert, 'end', None)
self.assertRaises(tkinter.TclError, self.nb.insert, None, 0)
self.assertRaises(tkinter.TclError, self.nb.insert, None, None)
def test_select(self):
self.nb.pack()
self.nb.wait_visibility()
success = []
tab_changed = []
self.child1.bind('<Unmap>', lambda evt: success.append(True))
self.nb.bind('<<NotebookTabChanged>>',
lambda evt: tab_changed.append(True))
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.select(self.child2)
self.assertTrue(success)
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.update()
self.assertTrue(tab_changed)
def test_tab(self):
self.assertRaises(tkinter.TclError, self.nb.tab, -1)
self.assertRaises(tkinter.TclError, self.nb.tab, 'notab')
self.assertRaises(tkinter.TclError, self.nb.tab, None)
self.assertIsInstance(self.nb.tab(self.child1), dict)
self.assertEqual(self.nb.tab(self.child1, text=None), 'a')
self.assertEqual(self.nb.tab(self.child1, 'text'), 'a')
self.nb.tab(self.child1, text='abc')
self.assertEqual(self.nb.tab(self.child1, text=None), 'abc')
self.assertEqual(self.nb.tab(self.child1, 'text'), 'abc')
def test_tabs(self):
self.assertEqual(len(self.nb.tabs()), 2)
self.nb.forget(self.child1)
self.nb.forget(self.child2)
self.assertEqual(self.nb.tabs(), ())
def test_traversal(self):
self.nb.pack()
self.nb.wait_visibility()
self.nb.select(0)
simulate_mouse_click(self.nb, 5, 5)
self.nb.focus_force()
self.nb.event_generate('<Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child1))
self.nb.focus_force()
self.nb.event_generate('<Shift-Control-Tab>')
self.assertEqual(self.nb.select(), str(self.child2))
self.nb.tab(self.child1, text='a', underline=0)
self.nb.enable_traversal()
self.nb.focus_force()
simulate_mouse_click(self.nb, 5, 5)
if sys.platform == 'darwin':
self.nb.event_generate('<Option-a>')
else:
self.nb.event_generate('<Alt-a>')
self.assertEqual(self.nb.select(), str(self.child1))
@add_standard_options(StandardTtkOptionsTests)
class TreeviewTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'columns', 'cursor', 'displaycolumns',
'height', 'padding', 'selectmode', 'show',
'style', 'takefocus', 'xscrollcommand', 'yscrollcommand',
)
def setUp(self):
super().setUp()
self.tv = self.create(padding=0)
def create(self, **kwargs):
return ttk.Treeview(self.root, **kwargs)
def test_columns(self):
widget = self.create()
self.checkParam(widget, 'columns', 'a b c',
expected=('a', 'b', 'c'))
self.checkParam(widget, 'columns', ('a', 'b', 'c'))
self.checkParam(widget, 'columns', ())
def test_displaycolumns(self):
widget = self.create()
widget['columns'] = ('a', 'b', 'c')
self.checkParam(widget, 'displaycolumns', 'b a c',
expected=('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', ('b', 'a', 'c'))
self.checkParam(widget, 'displaycolumns', '#all',
expected=('#all',))
self.checkParam(widget, 'displaycolumns', (2, 1, 0))
self.checkInvalidParam(widget, 'displaycolumns', ('a', 'b', 'd'),
errmsg='Invalid column index d')
self.checkInvalidParam(widget, 'displaycolumns', (1, 2, 3),
errmsg='Column index 3 out of bounds')
self.checkInvalidParam(widget, 'displaycolumns', (1, -2),
errmsg='Column index -2 out of bounds')
def test_height(self):
widget = self.create()
self.checkPixelsParam(widget, 'height', 100, -100, 0, '3c', conv=False)
self.checkPixelsParam(widget, 'height', 101.2, 102.6, conv=noconv)
def test_selectmode(self):
widget = self.create()
self.checkEnumParam(widget, 'selectmode',
'none', 'browse', 'extended')
def test_show(self):
widget = self.create()
self.checkParam(widget, 'show', 'tree headings',
expected=('tree', 'headings'))
self.checkParam(widget, 'show', ('tree', 'headings'))
self.checkParam(widget, 'show', ('headings', 'tree'))
self.checkParam(widget, 'show', 'tree', expected=('tree',))
self.checkParam(widget, 'show', 'headings', expected=('headings',))
def test_bbox(self):
self.tv.pack()
self.assertEqual(self.tv.bbox(''), '')
self.tv.wait_visibility()
self.tv.update()
item_id = self.tv.insert('', 'end')
children = self.tv.get_children()
self.assertTrue(children)
bbox = self.tv.bbox(children[0])
self.assertIsBoundingBox(bbox)
self.tv['columns'] = ['test']
self.tv.column('test', width=50)
bbox_column0 = self.tv.bbox(children[0], 0)
root_width = self.tv.column('#0', width=None)
if not self.wantobjects:
root_width = int(root_width)
self.assertEqual(bbox_column0[0], bbox[0] + root_width)
child1 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.bbox(child1), '')
def test_children(self):
self.assertEqual(self.tv.get_children(), ())
item_id = self.tv.insert('', 'end')
self.assertIsInstance(self.tv.get_children(), tuple)
self.assertEqual(self.tv.get_children()[0], item_id)
child2 = self.tv.insert('', 'end')
child3 = self.tv.insert('', 'end')
self.tv.set_children(child2, item_id, child3)
self.assertEqual(self.tv.get_children(child2), (item_id, child3))
self.assertRaises(tkinter.TclError,
self.tv.set_children, child3, child2)
self.tv.set_children(child2)
self.assertEqual(self.tv.get_children(child2), ())
self.tv.set_children('')
self.assertEqual(self.tv.get_children(), ())
def test_column(self):
# return a dict with all options/values
self.assertIsInstance(self.tv.column('
# return a single value of the given option
if self.wantobjects:
self.assertIsInstance(self.tv.column('
# set a new value for an option
self.tv.column('
# testing new way to get option value
self.assertEqual(self.tv.column('
10 if self.wantobjects else '10')
self.assertEqual(self.tv.column('
10 if self.wantobjects else '10')
# check read-only option
self.assertRaises(tkinter.TclError, self.tv.column, '
self.assertRaises(tkinter.TclError, self.tv.column, 'invalid')
invalid_kws = [
{'unknown_option': 'some value'}, {'stretch': 'wrong'},
{'anchor': 'wrong'}, {'width': 'wrong'}, {'minwidth': 'wrong'}
]
for kw in invalid_kws:
self.assertRaises(tkinter.TclError, self.tv.column, '
**kw)
def test_delete(self):
self.assertRaises(tkinter.TclError, self.tv.delete, '
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
self.tv.delete(item_id)
self.assertFalse(self.tv.get_children())
# reattach should fail
self.assertRaises(tkinter.TclError,
self.tv.reattach, item_id, '', 'end')
# test multiple item delete
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
self.assertEqual(self.tv.get_children(), (item1, item2))
self.tv.delete(item1, item2)
self.assertFalse(self.tv.get_children())
def test_detach_reattach(self):
item_id = self.tv.insert('', 'end')
item2 = self.tv.insert(item_id, 'end')
# calling detach without items is valid, although it does nothing
prev = self.tv.get_children()
self.tv.detach() # this should do nothing
self.assertEqual(prev, self.tv.get_children())
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# detach item with children
self.tv.detach(item_id)
self.assertFalse(self.tv.get_children())
# reattach item with children
self.tv.reattach(item_id, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, ))
self.assertEqual(self.tv.get_children(item_id), (item2, ))
# move a children to the root
self.tv.move(item2, '', 'end')
self.assertEqual(self.tv.get_children(), (item_id, item2))
self.assertEqual(self.tv.get_children(item_id), ())
# bad values
self.assertRaises(tkinter.TclError,
self.tv.reattach, 'nonexistent', '', 'end')
self.assertRaises(tkinter.TclError,
self.tv.detach, 'nonexistent')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, 'otherparent', 'end')
self.assertRaises(tkinter.TclError,
self.tv.reattach, item2, '', 'invalid')
# multiple detach
self.tv.detach(item_id, item2)
self.assertEqual(self.tv.get_children(), ())
self.assertEqual(self.tv.get_children(item_id), ())
def test_exists(self):
self.assertEqual(self.tv.exists('something'), False)
self.assertEqual(self.tv.exists(''), True)
self.assertEqual(self.tv.exists({}), False)
# the following will make a tk.call equivalent to
# tk.call(treeview, "exists") which should result in an error
# in the tcl interpreter since tk requires an item.
self.assertRaises(tkinter.TclError, self.tv.exists, None)
def test_focus(self):
# nothing is focused right now
self.assertEqual(self.tv.focus(), '')
item1 = self.tv.insert('', 'end')
self.tv.focus(item1)
self.assertEqual(self.tv.focus(), item1)
self.tv.delete(item1)
self.assertEqual(self.tv.focus(), '')
# try focusing inexistent item
self.assertRaises(tkinter.TclError, self.tv.focus, 'hi')
def test_heading(self):
# check a dict is returned
self.assertIsInstance(self.tv.heading('
# check a value is returned
self.tv.heading('
self.assertEqual(self.tv.heading('
self.assertEqual(self.tv.heading('
# invalid option
self.assertRaises(tkinter.TclError, self.tv.heading, '
background=None)
# invalid value
self.assertRaises(tkinter.TclError, self.tv.heading, '
anchor=1)
def test_heading_callback(self):
def simulate_heading_click(x, y):
simulate_mouse_click(self.tv, x, y)
self.tv.update()
success = [] # no success for now
self.tv.pack()
self.tv.wait_visibility()
self.tv.heading('
self.tv.column('
self.tv.update()
# assuming that the coords (5, 5) fall into heading #0
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
success = []
commands = self.tv.master._tclCommands
self.tv.heading('#0', command=str(self.tv.heading('#0', command=None)))
self.assertEqual(commands, self.tv.master._tclCommands)
simulate_heading_click(5, 5)
if not success:
self.fail("The command associated to the treeview heading wasn't "
"invoked.")
# XXX The following raises an error in a tcl interpreter, but not in
# Python
#self.tv.heading('
#simulate_heading_click(5, 5)
def test_index(self):
# item 'what' doesn't exist
self.assertRaises(tkinter.TclError, self.tv.index, 'what')
self.assertEqual(self.tv.index(''), 0)
item1 = self.tv.insert('', 'end')
item2 = self.tv.insert('', 'end')
c1 = self.tv.insert(item1, 'end')
c2 = self.tv.insert(item1, 'end')
self.assertEqual(self.tv.index(item1), 0)
self.assertEqual(self.tv.index(c1), 0)
self.assertEqual(self.tv.index(c2), 1)
self.assertEqual(self.tv.index(item2), 1)
self.tv.move(item2, '', 0)
self.assertEqual(self.tv.index(item2), 0)
self.assertEqual(self.tv.index(item1), 1)
self.tv.detach(item1)
self.assertEqual(self.tv.index(c2), 1)
self.tv.detach(c1)
self.assertEqual(self.tv.index(c2), 0)
self.tv.delete(item1)
self.assertRaises(tkinter.TclError, self.tv.index, c2)
def test_insert_item(self):
self.assertRaises(tkinter.TclError, self.tv.insert, 'none', 'end')
# open values
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
open='please')
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=True)))
self.assertFalse(self.tv.delete(self.tv.insert('', 'end', open=False)))
# invalid index
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'middle')
# trying to duplicate item id is invalid
itemid = self.tv.insert('', 'end', 'first-item')
self.assertEqual(itemid, 'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
'first-item')
self.assertRaises(tkinter.TclError, self.tv.insert, '', 'end',
MockTclObj('first-item'))
# unicode values
value = '\xe1ba'
item = self.tv.insert('', 'end', values=(value, ))
self.assertEqual(self.tv.item(item, 'values'),
(value,) if self.wantobjects else value)
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.tv.item(item, values=self.root.splitlist(self.tv.item(item, values=None)))
self.assertEqual(self.tv.item(item, values=None),
(value,) if self.wantobjects else value)
self.assertIsInstance(self.tv.item(item), dict)
# erase item values
self.tv.item(item, values='')
self.assertFalse(self.tv.item(item, values=None))
# item tags
item = self.tv.insert('', 'end', tags=[1, 2, value])
self.assertEqual(self.tv.item(item, tags=None),
('1', '2', value) if self.wantobjects else
'1 2 %s' % value)
self.tv.item(item, tags=[])
self.assertFalse(self.tv.item(item, tags=None))
self.tv.item(item, tags=(1, 2))
self.assertEqual(self.tv.item(item, tags=None),
('1', '2') if self.wantobjects else '1 2')
# values with spaces
item = self.tv.insert('', 'end', values=('a b c',
'%s %s' % (value, value)))
self.assertEqual(self.tv.item(item, values=None),
('a b c', '%s %s' % (value, value)) if self.wantobjects else
'{a b c} {%s %s}' % (value, value))
# text
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text="Label here"), text=None),
"Label here")
self.assertEqual(self.tv.item(
self.tv.insert('', 'end', text=value), text=None),
value)
def test_set(self):
self.tv['columns'] = ['A', 'B']
item = self.tv.insert('', 'end', values=['a', 'b'])
self.assertEqual(self.tv.set(item), {'A': 'a', 'B': 'b'})
self.tv.set(item, 'B', 'a')
self.assertEqual(self.tv.item(item, values=None),
('a', 'a') if self.wantobjects else 'a a')
self.tv['columns'] = ['B']
self.assertEqual(self.tv.set(item), {'B': 'a'})
self.tv.set(item, 'B', 'b')
self.assertEqual(self.tv.set(item, column='B'), 'b')
self.assertEqual(self.tv.item(item, values=None),
('b', 'a') if self.wantobjects else 'b a')
self.tv.set(item, 'B', 123)
self.assertEqual(self.tv.set(item, 'B'),
123 if self.wantobjects else '123')
self.assertEqual(self.tv.item(item, values=None),
(123, 'a') if self.wantobjects else '123 a')
self.assertEqual(self.tv.set(item),
{'B': 123} if self.wantobjects else {'B': '123'})
# inexistent column
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A')
self.assertRaises(tkinter.TclError, self.tv.set, item, 'A', 'b')
# inexistent item
self.assertRaises(tkinter.TclError, self.tv.set, 'notme')
def test_tag_bind(self):
events = []
item1 = self.tv.insert('', 'end', tags=['call'])
item2 = self.tv.insert('', 'end', tags=['call'])
self.tv.tag_bind('call', '<ButtonPress-1>',
lambda evt: events.append(1))
self.tv.tag_bind('call', '<ButtonRelease-1>',
lambda evt: events.append(2))
self.tv.pack()
self.tv.wait_visibility()
self.tv.update()
pos_y = set()
found = set()
for i in range(0, 100, 10):
if len(found) == 2: # item1 and item2 already found
break
item_id = self.tv.identify_row(i)
if item_id and item_id not in found:
pos_y.add(i)
found.add(item_id)
self.assertEqual(len(pos_y), 2) # item1 and item2 y pos
for y in pos_y:
simulate_mouse_click(self.tv, 0, y)
# by now there should be 4 things in the events list, since each
# item had a bind for two events that were simulated above
self.assertEqual(len(events), 4)
for evt in zip(events[::2], events[1::2]):
self.assertEqual(evt, (1, 2))
def test_tag_configure(self):
# Just testing parameter passing for now
self.assertRaises(TypeError, self.tv.tag_configure)
self.assertRaises(tkinter.TclError, self.tv.tag_configure,
'test', sky='blue')
self.tv.tag_configure('test', foreground='blue')
self.assertEqual(str(self.tv.tag_configure('test', 'foreground')),
'blue')
self.assertEqual(str(self.tv.tag_configure('test', foreground=None)),
'blue')
self.assertIsInstance(self.tv.tag_configure('test'), dict)
@add_standard_options(StandardTtkOptionsTests)
class SeparatorTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'orient', 'style', 'takefocus',
# 'state'?
)
default_orient = 'horizontal'
def create(self, **kwargs):
return ttk.Separator(self.root, **kwargs)
@add_standard_options(StandardTtkOptionsTests)
class SizegripTest(AbstractWidgetTest, unittest.TestCase):
OPTIONS = (
'class', 'cursor', 'style', 'takefocus',
# 'state'?
)
def create(self, **kwargs):
return ttk.Sizegrip(self.root, **kwargs)
tests_gui = (
ButtonTest, CheckbuttonTest, ComboboxTest, EntryTest,
FrameTest, LabelFrameTest, LabelTest, MenubuttonTest,
NotebookTest, PanedWindowTest, ProgressbarTest,
RadiobuttonTest, ScaleTest, ScrollbarTest, SeparatorTest,
SizegripTest, TreeviewTest, WidgetTest,
)
if __name__ == "__main__":
unittest.main()
| true | true |
f73ac8992dda1543526d61357ec581788457a368 | 1,499 | py | Python | blog/bills/forms.py | sivatoms/Blog_Posts | 28944c66efb7a296a662d3b81bdaf65b153c995c | [
"bzip2-1.0.6"
] | null | null | null | blog/bills/forms.py | sivatoms/Blog_Posts | 28944c66efb7a296a662d3b81bdaf65b153c995c | [
"bzip2-1.0.6"
] | 4 | 2020-04-09T18:28:58.000Z | 2021-06-10T18:51:22.000Z | blog/bills/forms.py | sivatoms/Blog_posts | 28944c66efb7a296a662d3b81bdaf65b153c995c | [
"bzip2-1.0.6"
] | null | null | null | from django import forms
from .models import Transactions, GroupMembers,Group
class Bill_CreateForm(forms.ModelForm):
def __init__(self, user_list, *args, **kwargs):
super(Bill_CreateForm, self).__init__(*args, **kwargs)
self.fields['share_with'] = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,choices=tuple([(name, name.members) for name in user_list]))
#self.fields['added_by'] = forms.ChoiceField(choices=tuple([(name, name.members) for name in user_list]))
class Meta:
model = Transactions
fields = (
'bill_type',
'amount',
'added_by',
'added_to',
'share_with',
)
class Bill_EditForm(forms.ModelForm):
def __init__(self, user_list, *args, **kwargs):
super(Bill_EditForm, self).__init__(*args, **kwargs)
self.fields['share_with'] = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,choices=tuple([(name, name.members) for name in user_list]))
#self.fields['added_by'] = forms.ChoiceField(choices=tuple([(name, name.members) for name in user_list]))
class Meta:
model = Transactions
fields = (
'bill_type',
'amount',
'added_by',
'added_to',
'share_with',
)
class Group_CreateForm(forms.ModelForm):
class Meta:
model = Group
fields=[
'group_name',
]
| 32.586957 | 158 | 0.6004 | from django import forms
from .models import Transactions, GroupMembers,Group
class Bill_CreateForm(forms.ModelForm):
def __init__(self, user_list, *args, **kwargs):
super(Bill_CreateForm, self).__init__(*args, **kwargs)
self.fields['share_with'] = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,choices=tuple([(name, name.members) for name in user_list]))
class Meta:
model = Transactions
fields = (
'bill_type',
'amount',
'added_by',
'added_to',
'share_with',
)
class Bill_EditForm(forms.ModelForm):
def __init__(self, user_list, *args, **kwargs):
super(Bill_EditForm, self).__init__(*args, **kwargs)
self.fields['share_with'] = forms.MultipleChoiceField(widget=forms.CheckboxSelectMultiple,choices=tuple([(name, name.members) for name in user_list]))
class Meta:
model = Transactions
fields = (
'bill_type',
'amount',
'added_by',
'added_to',
'share_with',
)
class Group_CreateForm(forms.ModelForm):
class Meta:
model = Group
fields=[
'group_name',
]
| true | true |
f73ac96f95aedad32c337d17a7c0091a32952e8f | 3,772 | py | Python | contrib/macdeploy/custom_dsstore.py | cobrabytes/cobrax | d57cbace735ce1f0ffae7e866428358dc475714d | [
"MIT"
] | 3 | 2019-04-02T14:58:30.000Z | 2020-04-03T14:29:55.000Z | contrib/macdeploy/custom_dsstore.py | cobrabytes/cobrax | d57cbace735ce1f0ffae7e866428358dc475714d | [
"MIT"
] | null | null | null | contrib/macdeploy/custom_dsstore.py | cobrabytes/cobrax | d57cbace735ce1f0ffae7e866428358dc475714d | [
"MIT"
] | 1 | 2020-04-06T05:09:56.000Z | 2020-04-06T05:09:56.000Z | #!/usr/bin/env python
# Copyright (c) 2013-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['cobrax-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close() | 62.866667 | 1,817 | 0.727466 |
from __future__ import division,print_function,unicode_literals
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': b'{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['cobrax-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close() | true | true |
f73ac9be001c59c31029b8768a02d9bf6ee25a79 | 350 | py | Python | package/ml/regression/logistic.py | xenron/coco | e318d534127b769612716c05d40e3d5b090eb5a3 | [
"MIT"
] | null | null | null | package/ml/regression/logistic.py | xenron/coco | e318d534127b769612716c05d40e3d5b090eb5a3 | [
"MIT"
] | null | null | null | package/ml/regression/logistic.py | xenron/coco | e318d534127b769612716c05d40e3d5b090eb5a3 | [
"MIT"
] | null | null | null | from sklearn import linear_model
from ml.regression.base import Regression
class LogisticRegression(Regression):
def __init__(self):
Regression.__init__(self)
self._name = "Logistic"
self._model = linear_model.LogisticRegression(C=1e5)
def predict_proba(self, data):
return self._model.predict_proba(data)
| 25 | 60 | 0.722857 | from sklearn import linear_model
from ml.regression.base import Regression
class LogisticRegression(Regression):
def __init__(self):
Regression.__init__(self)
self._name = "Logistic"
self._model = linear_model.LogisticRegression(C=1e5)
def predict_proba(self, data):
return self._model.predict_proba(data)
| true | true |
f73acca3571b8aea34d7e9fe1577667e41ffb1da | 1,036 | py | Python | geometric_primitives/rules/rules_mixed_22_12.py | POSTECH-CVLab/Geometric-Primitives | e4b16d8930f4a9d1c906d06255988d02f54a6deb | [
"MIT"
] | 1 | 2022-03-16T13:01:33.000Z | 2022-03-16T13:01:33.000Z | geometric_primitives/rules/rules_mixed_22_12.py | POSTECH-CVLab/Geometric-Primitives | e4b16d8930f4a9d1c906d06255988d02f54a6deb | [
"MIT"
] | null | null | null | geometric_primitives/rules/rules_mixed_22_12.py | POSTECH-CVLab/Geometric-Primitives | e4b16d8930f4a9d1c906d06255988d02f54a6deb | [
"MIT"
] | null | null | null | import numpy as np
PROBS_CONTACTS = np.array([4.0, 2.0, 4.0, 2.0])
PROBS_CONTACTS /= np.sum(PROBS_CONTACTS)
RULE_CONTACTS = [
# [0.5, 1.0] -> 4
{
'num_contacts': 1,
'translations': [[0.5, 1.0], [-0.5, 1.0], [0.5, -1.0], [-0.5, -1.0]],
'direction': 0
},
# [0.5, 0.0] -> 2
{
'num_contacts': 2,
'translations': [[0.5, 0.0], [-0.5, 0.0]],
'direction': 0
},
# [1.0, 0.5] -> 4
{
'num_contacts': 1,
'translations': [[1.0, 0.5], [-1.0, 0.5], [1.0, -0.5], [-1.0, -0.5]],
'direction': 1
},
# [0.0, 0.5] -> 2
{
'num_contacts': 2,
'translations': [[0.0, 0.5], [0.0, -0.5]],
'direction': 1
},
]
LIST_RULES = []
ind = 1
for rule in RULE_CONTACTS:
cur_direction = rule['direction']
cur_num_contacts = rule['num_contacts']
for translation in rule['translations']:
cur_rule = [ind, [cur_direction, translation, cur_num_contacts]]
LIST_RULES.append(cur_rule)
ind += 1
| 23.022222 | 77 | 0.485521 | import numpy as np
PROBS_CONTACTS = np.array([4.0, 2.0, 4.0, 2.0])
PROBS_CONTACTS /= np.sum(PROBS_CONTACTS)
RULE_CONTACTS = [
{
'num_contacts': 1,
'translations': [[0.5, 1.0], [-0.5, 1.0], [0.5, -1.0], [-0.5, -1.0]],
'direction': 0
},
{
'num_contacts': 2,
'translations': [[0.5, 0.0], [-0.5, 0.0]],
'direction': 0
},
{
'num_contacts': 1,
'translations': [[1.0, 0.5], [-1.0, 0.5], [1.0, -0.5], [-1.0, -0.5]],
'direction': 1
},
{
'num_contacts': 2,
'translations': [[0.0, 0.5], [0.0, -0.5]],
'direction': 1
},
]
LIST_RULES = []
ind = 1
for rule in RULE_CONTACTS:
cur_direction = rule['direction']
cur_num_contacts = rule['num_contacts']
for translation in rule['translations']:
cur_rule = [ind, [cur_direction, translation, cur_num_contacts]]
LIST_RULES.append(cur_rule)
ind += 1
| true | true |
f73accc173ad73d9a58a5324465c80f5c599d226 | 650 | py | Python | src/lithopscloud/modules/gen2/ray/endpoint.py | Cohen-J-Omer/lithopscloud | bc897ae3952e0faca42581846a8b3169a0b5b49f | [
"Apache-2.0"
] | 2 | 2021-08-22T04:36:34.000Z | 2021-11-08T16:02:57.000Z | src/lithopscloud/modules/gen2/ray/endpoint.py | Cohen-J-Omer/lithopscloud | bc897ae3952e0faca42581846a8b3169a0b5b49f | [
"Apache-2.0"
] | null | null | null | src/lithopscloud/modules/gen2/ray/endpoint.py | Cohen-J-Omer/lithopscloud | bc897ae3952e0faca42581846a8b3169a0b5b49f | [
"Apache-2.0"
] | 1 | 2021-08-22T08:45:19.000Z | 2021-08-22T08:45:19.000Z | from lithopscloud.modules.gen2.endpoint import EndpointConfig
from typing import Any, Dict
from lithopscloud.modules.utils import get_region_by_endpoint
class RayEndpointConfig(EndpointConfig):
def __init__(self, base_config: Dict[str, Any]) -> None:
super().__init__(base_config)
base_endpoint = self.base_config['provider'].get('endpoint')
self.defaults['region'] = get_region_by_endpoint(base_endpoint) if base_endpoint else None
def update_config(self, endpoint):
self.base_config['provider']['endpoint'] = endpoint
self.base_config['provider']['region'] = get_region_by_endpoint(endpoint)
| 40.625 | 98 | 0.743077 | from lithopscloud.modules.gen2.endpoint import EndpointConfig
from typing import Any, Dict
from lithopscloud.modules.utils import get_region_by_endpoint
class RayEndpointConfig(EndpointConfig):
def __init__(self, base_config: Dict[str, Any]) -> None:
super().__init__(base_config)
base_endpoint = self.base_config['provider'].get('endpoint')
self.defaults['region'] = get_region_by_endpoint(base_endpoint) if base_endpoint else None
def update_config(self, endpoint):
self.base_config['provider']['endpoint'] = endpoint
self.base_config['provider']['region'] = get_region_by_endpoint(endpoint)
| true | true |
f73acd35f78e02cd3c56065883d4d1c64b0b4deb | 2,619 | py | Python | plugins/module_utils/storage.py | krestomatio/ansible-collection-k8s | 895f3d1358ca1cdcea2c559cb31f9ff20b868192 | [
"Apache-2.0"
] | 1 | 2021-07-02T18:52:44.000Z | 2021-07-02T18:52:44.000Z | plugins/module_utils/storage.py | krestomatio/ansible-collection-k8s | 895f3d1358ca1cdcea2c559cb31f9ff20b868192 | [
"Apache-2.0"
] | 89 | 2021-06-27T00:37:30.000Z | 2022-03-23T03:23:33.000Z | plugins/module_utils/storage.py | krestomatio/ansible-collection-k8s | 895f3d1358ca1cdcea2c559cb31f9ff20b868192 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function
__metaclass__ = type
import math
from ansible.module_utils.facts.hardware import linux
from ansible.module_utils.facts.utils import get_mount_size
def get_mount_info(module):
lh = linux.LinuxHardware(module)
bind_mounts = lh._find_bind_mounts()
mtab_entries = lh._mtab_entries()
mount_info = {}
for fields in mtab_entries:
# Transform octal escape sequences
fields = [lh._replace_octal_escapes(field) for field in fields]
(device, mount, fstype, options) = (fields[0], fields[1],
fields[2], fields[3])
if not device.startswith('/') and ':/' not in device or fstype \
== 'none':
continue
if module.params['path'] != mount:
break
mount_info = get_mount_size(mount)
mount_info['mount'] = mount
mount_info['device'] = device
mount_info['fstype'] = fstype
mount_info['options'] = options
if mount in bind_mounts:
# only add if not already there, we might have a plain /etc/mtab
if not lh.MTAB_BIND_MOUNT_RE.match(options):
mount_info['options'] += ',bind'
return mount_info
def b_to_gib(bytes):
gib = round(int(bytes) / 1024 ** 3, 1)
return gib
def below_twenty_pct(size_available, size_total):
float_size_total = float(size_total)
float_size_available = float(size_available)
if float_size_total < 0 or float_size_available < 0:
return False
return bool(percentage(float_size_available, float_size_total)
< 20.0)
def percentage(part, whole):
return round(100 * float(part) / float(whole), 1)
def recommended_size_gib(
current_gib,
increment_gib=25,
cap_gib=1000,
expansion_required=False,
):
float_cap_gib = float(cap_gib)
float_current_gib = float(current_gib)
float_increment_gib = float(increment_gib)
# get next bigger multiple of 'increment_gib' from 'current_gib'
autoexpand_gib = math.ceil(float_current_gib / float_increment_gib) \
* float_increment_gib
# Increment GiB (value from 'increment_gib') as long as expansion is required and
# 'current_gib' is 20% closer to next bigger multiple of 'increment_gib'
if expansion_required and float_current_gib > autoexpand_gib * 0.8:
autoexpand_gib += float_increment_gib
# Return 'autoexpand_gib' unless cap is reached
if autoexpand_gib < float_cap_gib:
return autoexpand_gib
else:
return float_cap_gib
| 30.103448 | 85 | 0.668194 | from __future__ import absolute_import, division, print_function
__metaclass__ = type
import math
from ansible.module_utils.facts.hardware import linux
from ansible.module_utils.facts.utils import get_mount_size
def get_mount_info(module):
lh = linux.LinuxHardware(module)
bind_mounts = lh._find_bind_mounts()
mtab_entries = lh._mtab_entries()
mount_info = {}
for fields in mtab_entries:
fields = [lh._replace_octal_escapes(field) for field in fields]
(device, mount, fstype, options) = (fields[0], fields[1],
fields[2], fields[3])
if not device.startswith('/') and ':/' not in device or fstype \
== 'none':
continue
if module.params['path'] != mount:
break
mount_info = get_mount_size(mount)
mount_info['mount'] = mount
mount_info['device'] = device
mount_info['fstype'] = fstype
mount_info['options'] = options
if mount in bind_mounts:
if not lh.MTAB_BIND_MOUNT_RE.match(options):
mount_info['options'] += ',bind'
return mount_info
def b_to_gib(bytes):
gib = round(int(bytes) / 1024 ** 3, 1)
return gib
def below_twenty_pct(size_available, size_total):
float_size_total = float(size_total)
float_size_available = float(size_available)
if float_size_total < 0 or float_size_available < 0:
return False
return bool(percentage(float_size_available, float_size_total)
< 20.0)
def percentage(part, whole):
return round(100 * float(part) / float(whole), 1)
def recommended_size_gib(
current_gib,
increment_gib=25,
cap_gib=1000,
expansion_required=False,
):
float_cap_gib = float(cap_gib)
float_current_gib = float(current_gib)
float_increment_gib = float(increment_gib)
autoexpand_gib = math.ceil(float_current_gib / float_increment_gib) \
* float_increment_gib
if expansion_required and float_current_gib > autoexpand_gib * 0.8:
autoexpand_gib += float_increment_gib
if autoexpand_gib < float_cap_gib:
return autoexpand_gib
else:
return float_cap_gib
| true | true |
f73ace1ba171ba4daa91c9c9f9fc822852b22115 | 1,753 | py | Python | Medium/230.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | 6 | 2017-09-25T18:05:50.000Z | 2019-03-27T00:23:15.000Z | Medium/230.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | 1 | 2017-10-29T12:04:41.000Z | 2018-08-16T18:00:37.000Z | Medium/230.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | null | null | null | # ------------------------------
# 230. Kth Smallest Element in a BST
#
# Description:
# Given a binary search tree, write a function kthSmallest to find the kth smallest element in it.
# Note:
# You may assume k is always valid, 1 ≤ k ≤ BST's total elements.
#
# Example 1:
# Input: root = [3,1,4,null,2], k = 1
# 3
# / \
# 1 4
# \
# 2
# Output: 1
#
# Example 2:
# Input: root = [5,3,6,2,4,null,null,1], k = 3
# 5
# / \
# 3 6
# / \
# 2 4
# /
# 1
# Output: 3
#
# Follow up:
# What if the BST is modified (insert/delete operations) often and you need to find the kth smallest frequently? How would you optimize the kthSmallest routine?
#
# Version: 1.0
# 09/26/18 by Jianfa
# ------------------------------
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def kthSmallest(self, root, k):
"""
:type root: TreeNode
:type k: int
:rtype: int
Inorder DFS, build the orderList one by one with inorder traverse.
"""
orderList = []
self.helper(root, orderList)
return orderList[k-1]
def helper(self, root, orderList):
if not root:
return
self.helper(root.left, orderList)
orderList.append(root.val)
self.helper(root.right, orderList)
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Idea from https://leetcode.com/problems/kth-smallest-element-in-a-bst/discuss/63660/3-ways-implemented-in-JAVA-(Python):-Binary-Search-in-order-iterative-and-recursive | 25.042857 | 169 | 0.563035 |
#
# Example 1:
# Input: root = [3,1,4,null,2], k = 1
# 3
# / \
# 1 4
# \
# 2
# Output: 1
#
# Example 2:
# Input: root = [5,3,6,2,4,null,null,1], k = 3
# 5
# / \
# 3 6
# / \
# 2 4
# /
# 1
# Output: 3
#
# Follow up:
# What if the BST is modified (insert/delete operations) often and you need to find the kth smallest frequently? How would you optimize the kthSmallest routine?
#
# Version: 1.0
# 09/26/18 by Jianfa
# ------------------------------
# Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def kthSmallest(self, root, k):
orderList = []
self.helper(root, orderList)
return orderList[k-1]
def helper(self, root, orderList):
if not root:
return
self.helper(root.left, orderList)
orderList.append(root.val)
self.helper(root.right, orderList)
# Used for testing
if __name__ == "__main__":
test = Solution()
# ------------------------------
# Summary:
# Idea from https://leetcode.com/problems/kth-smallest-element-in-a-bst/discuss/63660/3-ways-implemented-in-JAVA-(Python):-Binary-Search-in-order-iterative-and-recursive | true | true |
f73ace623db2f5593d9d8d6027b84f567eab2154 | 364 | py | Python | a-aulas/exe-004 comandos primitivos.py | solito83/aprendendo-programar-em-python | af5bce7efa2ffa4d081c015830e3710044899924 | [
"MIT"
] | null | null | null | a-aulas/exe-004 comandos primitivos.py | solito83/aprendendo-programar-em-python | af5bce7efa2ffa4d081c015830e3710044899924 | [
"MIT"
] | null | null | null | a-aulas/exe-004 comandos primitivos.py | solito83/aprendendo-programar-em-python | af5bce7efa2ffa4d081c015830e3710044899924 | [
"MIT"
] | null | null | null | print('exe-004 comandos primitivos')
print('Tipos primitivos de comandos -int = inteiro- floot')
num1=int(input('Digite um número '))
num2=int(input('Digite outro número '))
soma=num1+num2
fim = 'The End'
#print('A soma de',num1, 'e',num2, 'é',soma)
print('A soma entre {} e {} vale {}'.format(num1, num2, soma))
print('Classe de num1',type(num1))
print (fim)
| 24.266667 | 62 | 0.678571 | print('exe-004 comandos primitivos')
print('Tipos primitivos de comandos -int = inteiro- floot')
num1=int(input('Digite um número '))
num2=int(input('Digite outro número '))
soma=num1+num2
fim = 'The End'
print('A soma entre {} e {} vale {}'.format(num1, num2, soma))
print('Classe de num1',type(num1))
print (fim)
| true | true |
f73acf03fc294d4ef378ddb86acfe81f9b48c3d1 | 2,115 | py | Python | backend/server/apps/endpoints/views.py | muhammedfasal/my_ml_service | 64a6a77f8a8c66fc2cef433927e953e28f09301f | [
"MIT"
] | null | null | null | backend/server/apps/endpoints/views.py | muhammedfasal/my_ml_service | 64a6a77f8a8c66fc2cef433927e953e28f09301f | [
"MIT"
] | null | null | null | backend/server/apps/endpoints/views.py | muhammedfasal/my_ml_service | 64a6a77f8a8c66fc2cef433927e953e28f09301f | [
"MIT"
] | null | null | null | from rest_framework import viewsets
from rest_framework import mixins
from apps.endpoints.models import Endpoint
from apps.endpoints.serializers import EndpointSerializer
from apps.endpoints.models import MLAlgorithm
from apps.endpoints.serializers import MLAlgorithmSerializer
from apps.endpoints.models import MLAlgorithmStatus
from apps.endpoints.serializers import MLAlgorithmStatusSerializer
from apps.endpoints.models import MLRequest
from apps.endpoints.serializers import MLRequestSerializer
class EndpointViewSet(
mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet
):
serializer_class = EndpointSerializer
queryset = Endpoint.objects.all()
class MLAlgorithmViewSet(
mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet
):
serializer_class = MLAlgorithmSerializer
queryset = MLAlgorithm.objects.all()
def deactivate_other_statuses(instance):
old_statuses = MLAlgorithmStatus.objects.filter(parent_mlalgorithm=instance.parent_mlalgorithm,
created_at__lt=instance.created_at,
active=True)
for i in range(len(old_statuses)):
old_statuses[i].active = False
MLAlgorithmStatus.objects.bulk_update(old_statuses, ["active"])
class MLAlgorithmStatusViewSet(
mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet,
mixins.CreateModelMixin
):
serializer_class = MLAlgorithmStatusSerializer
queryset = MLAlgorithmStatus.objects.all()
def perform_create(self, serializer):
try:
with transaction.atomic():
instance = serializer.save(active=True)
# set active=False for other statuses
deactivate_other_statuses(instance)
except Exception as e:
raise APIException(str(e))
class MLRequestViewSet(
mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet,
mixins.UpdateModelMixin
):
serializer_class = MLRequestSerializer
queryset = MLRequest.objects.all()
| 33.046875 | 99 | 0.740898 | from rest_framework import viewsets
from rest_framework import mixins
from apps.endpoints.models import Endpoint
from apps.endpoints.serializers import EndpointSerializer
from apps.endpoints.models import MLAlgorithm
from apps.endpoints.serializers import MLAlgorithmSerializer
from apps.endpoints.models import MLAlgorithmStatus
from apps.endpoints.serializers import MLAlgorithmStatusSerializer
from apps.endpoints.models import MLRequest
from apps.endpoints.serializers import MLRequestSerializer
class EndpointViewSet(
mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet
):
serializer_class = EndpointSerializer
queryset = Endpoint.objects.all()
class MLAlgorithmViewSet(
mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet
):
serializer_class = MLAlgorithmSerializer
queryset = MLAlgorithm.objects.all()
def deactivate_other_statuses(instance):
old_statuses = MLAlgorithmStatus.objects.filter(parent_mlalgorithm=instance.parent_mlalgorithm,
created_at__lt=instance.created_at,
active=True)
for i in range(len(old_statuses)):
old_statuses[i].active = False
MLAlgorithmStatus.objects.bulk_update(old_statuses, ["active"])
class MLAlgorithmStatusViewSet(
mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet,
mixins.CreateModelMixin
):
serializer_class = MLAlgorithmStatusSerializer
queryset = MLAlgorithmStatus.objects.all()
def perform_create(self, serializer):
try:
with transaction.atomic():
instance = serializer.save(active=True)
deactivate_other_statuses(instance)
except Exception as e:
raise APIException(str(e))
class MLRequestViewSet(
mixins.RetrieveModelMixin, mixins.ListModelMixin, viewsets.GenericViewSet,
mixins.UpdateModelMixin
):
serializer_class = MLRequestSerializer
queryset = MLRequest.objects.all()
| true | true |
f73acff10ef7c7399aa9b1fef305aa3e19118980 | 2,113 | py | Python | pyiArduinoI2Crelay/examples/detectionModule.py | tremaru/Py_iarduino_I2C_Relay | a2caefb84dc52f97c72b7c71dd7abf8e2f63c800 | [
"MIT"
] | null | null | null | pyiArduinoI2Crelay/examples/detectionModule.py | tremaru/Py_iarduino_I2C_Relay | a2caefb84dc52f97c72b7c71dd7abf8e2f63c800 | [
"MIT"
] | null | null | null | pyiArduinoI2Crelay/examples/detectionModule.py | tremaru/Py_iarduino_I2C_Relay | a2caefb84dc52f97c72b7c71dd7abf8e2f63c800 | [
"MIT"
] | 1 | 2021-11-09T13:16:14.000Z | 2021-11-09T13:16:14.000Z | #encoding=utf-8
#Данный пример определяет тип модуля реле или силового ключа подключённого к шине I2C.
from pyiArduinoI2Crelay import * # Подключаем библиотеку для работы с реле
pwrfet = pyiArduinoI2Crelay() # Объявляем объект pwrfet
# Если при объявлении объекта указать адрес, например, pyiArduinoI2Crelay(0xBB), то пример будет работать с тем модулем, адрес которого был указан.
print("На шине I2C", end=' ') #
if pwrfet.begin(): # Инициируем работу с модулем реле или силовыми ключами.
address = pwrfet.getAddress() # Записываем адресс
model = pwrfet.getModel() # Записываем модель
version = pwrfet.getVersion() # Записываем версию
if model == DEF_MODEL_2RM: # Сравниваем с константами
model = "электромеханическим реле на 2-канала" # и записываем в ту же переменную
elif model == DEF_MODEL_4RT:
model = "твердотельным реле на 4-канала"
elif model == DEF_MODEL_4NC:
model = "силовым ключом на 4 N-канала с измерением тока"
elif model == DEF_MODEL_4PC:
model = "силовым ключом на 4 P-канала с измерением тока"
elif model == DEF_MODEL_4NP:
model = "силовым ключом на 4 n-канала до 10a"
elif model == DEF_MODEL_4PP:
model = "силовым ключом на 4 P-канала до 10A"
else:
model = "неизвестным силовым ключом или реле"
#выводим текст в консоль.
print("найден модуль с адресом %#.2x,"
" который является %s,"
" версия прошивки модуля: %s.0."
% (address, model, version))
else:
# Если метод pwrfet.begin() вернул false, значит
# библиотеке не удалось найти модуль реле или силовых ключей.
print("нет ни силовых ключей, ни реле!")
| 62.147059 | 219 | 0.549929 |
from pyiArduinoI2Crelay import *
pwrfet = pyiArduinoI2Crelay()
print("На шине I2C", end=' ')
if pwrfet.begin():
address = pwrfet.getAddress()
model = pwrfet.getModel()
version = pwrfet.getVersion()
if model == DEF_MODEL_2RM:
model = "электромеханическим реле на 2-канала"
elif model == DEF_MODEL_4RT:
model = "твердотельным реле на 4-канала"
elif model == DEF_MODEL_4NC:
model = "силовым ключом на 4 N-канала с измерением тока"
elif model == DEF_MODEL_4PC:
model = "силовым ключом на 4 P-канала с измерением тока"
elif model == DEF_MODEL_4NP:
model = "силовым ключом на 4 n-канала до 10a"
elif model == DEF_MODEL_4PP:
model = "силовым ключом на 4 P-канала до 10A"
else:
model = "неизвестным силовым ключом или реле"
print("найден модуль с адресом %#.2x,"
" который является %s,"
" версия прошивки модуля: %s.0."
% (address, model, version))
else:
print("нет ни силовых ключей, ни реле!")
| true | true |
f73ad088db78196f2b4ffe622164e8a5f8894d3e | 1,351 | py | Python | Lista2_24.py | NormanDeveloper/Lista2_Python | 232c80b59c98023ee8a7844e2097cb102ed61261 | [
"MIT"
] | 1 | 2018-08-24T15:52:13.000Z | 2018-08-24T15:52:13.000Z | Lista2_24.py | NormanDeveloper/Lista2_Python | 232c80b59c98023ee8a7844e2097cb102ed61261 | [
"MIT"
] | null | null | null | Lista2_24.py | NormanDeveloper/Lista2_Python | 232c80b59c98023ee8a7844e2097cb102ed61261 | [
"MIT"
] | 1 | 2018-09-11T17:37:54.000Z | 2018-09-11T17:37:54.000Z | '''
Faça um Programa que leia 2 números e em seguida pergunte ao usuário qual operação ele deseja realizar. O resultado da operação deve ser acompanhado de uma frase que diga se o número é:
par ou ímpar;
positivo ou negativo;
inteiro ou decimal.
'''
numero1 = float(input("Digite o número 1: "))
numero2 = float(input("Digite o número 2: "))
operacao = input("Digite a operação que deseja realizar: [+, -, /, *]: ")
def checar():
if (resultado_operacao // 1 == resultado_operacao):
print("Inteiro")
if resultado_operacao % 2 == 0:
print("Par")
if resultado_operacao > 0:
print("Positivo")
else:
print("Negativo")
else:
print("Impar")
else:
print("Decimal")
if operacao == '+':
resultado_operacao = numero1 + numero2
print("Resultado: ", resultado_operacao)
checar()
elif operacao == '-':
resultado_operacao = numero1 - numero2
print("Resultado: ", resultado_operacao)
checar()
elif operacao == '/':
resultado_operacao = numero1 / numero2
print("Resultado: ", resultado_operacao)
checar()
elif operacao == '*':
resultado_operacao = numero1 * numero2
print("Resultado: ", resultado_operacao)
checar()
else:
print("Valor Invalido")
| 29.369565 | 186 | 0.606958 |
numero1 = float(input("Digite o número 1: "))
numero2 = float(input("Digite o número 2: "))
operacao = input("Digite a operação que deseja realizar: [+, -, /, *]: ")
def checar():
if (resultado_operacao // 1 == resultado_operacao):
print("Inteiro")
if resultado_operacao % 2 == 0:
print("Par")
if resultado_operacao > 0:
print("Positivo")
else:
print("Negativo")
else:
print("Impar")
else:
print("Decimal")
if operacao == '+':
resultado_operacao = numero1 + numero2
print("Resultado: ", resultado_operacao)
checar()
elif operacao == '-':
resultado_operacao = numero1 - numero2
print("Resultado: ", resultado_operacao)
checar()
elif operacao == '/':
resultado_operacao = numero1 / numero2
print("Resultado: ", resultado_operacao)
checar()
elif operacao == '*':
resultado_operacao = numero1 * numero2
print("Resultado: ", resultado_operacao)
checar()
else:
print("Valor Invalido")
| true | true |
f73ad0b4850aad3ec1aa2eec3a277fc53686f12e | 3,758 | py | Python | examples/modify_convnet.py | leimao/ONNX-Python-Examples | a70108116a5856b920b641002b32776edd0e91c6 | [
"MIT"
] | 6 | 2021-07-22T18:46:20.000Z | 2022-01-19T16:07:16.000Z | examples/modify_convnet.py | leimao/ONNX-Python-Examples | a70108116a5856b920b641002b32776edd0e91c6 | [
"MIT"
] | null | null | null | examples/modify_convnet.py | leimao/ONNX-Python-Examples | a70108116a5856b920b641002b32776edd0e91c6 | [
"MIT"
] | 1 | 2021-11-03T08:09:03.000Z | 2021-11-03T08:09:03.000Z | import onnx
from typing import Iterable
def print_tensor_data(initializer: onnx.TensorProto) -> None:
if initializer.data_type == onnx.TensorProto.DataType.FLOAT:
print(initializer.float_data)
elif initializer.data_type == onnx.TensorProto.DataType.INT32:
print(initializer.int32_data)
elif initializer.data_type == onnx.TensorProto.DataType.INT64:
print(initializer.int64_data)
elif initializer.data_type == onnx.TensorProto.DataType.DOUBLE:
print(initializer.double_data)
elif initializer.data_type == onnx.TensorProto.DataType.UINT64:
print(initializer.uint64_data)
else:
raise NotImplementedError
return
def dims_prod(dims: Iterable) -> int:
prod = 1
for dim in dims:
prod *= dim
return prod
def main() -> None:
model = onnx.load("convnet.onnx")
onnx.checker.check_model(model)
graph_def = model.graph
initializers = graph_def.initializer
# Modify initializer
for initializer in initializers:
# Data type:
# https://github.com/onnx/onnx/blob/rel-1.9.0/onnx/onnx.proto
print("Tensor information:")
print(
f"Tensor Name: {initializer.name}, Data Type: {initializer.data_type}, Shape: {initializer.dims}"
)
print("Tensor value before modification:")
print_tensor_data(initializer)
# Replace the value with new value.
if initializer.data_type == onnx.TensorProto.DataType.FLOAT:
for i in range(dims_prod(initializer.dims)):
initializer.float_data[i] = 2
print("Tensor value after modification:")
print_tensor_data(initializer)
# If we want to change the data type and dims, we need to create new tensors from scratch.
# onnx.helper.make_tensor
# Modify nodes
nodes = graph_def.node
for node in nodes:
print(node.name)
print(node.op_type)
print(node.input)
print(node.output)
# Modify batchnorm attributes.
if node.op_type == "BatchNormalization":
print("Attributes before adding:")
for attribute in node.attribute:
print(attribute)
# Add epislon for the BN nodes.
epsilon_attribute = onnx.helper.make_attribute("epsilon", 1e-06)
node.attribute.extend([epsilon_attribute])
# node.attribute.pop() # Pop an attribute if necessary.
print("Attributes after adding:")
for attribute in node.attribute:
print(attribute)
inputs = graph_def.input
for graph_input in inputs:
input_shape = []
for d in graph_input.type.tensor_type.shape.dim:
if d.dim_value == 0:
input_shape.append(None)
else:
input_shape.append(d.dim_value)
print(
f"Input Name: {graph_input.name}, Input Data Type: {graph_input.type.tensor_type.elem_type}, Input Shape: {input_shape}"
)
outputs = graph_def.output
for graph_output in outputs:
output_shape = []
for d in graph_output.type.tensor_type.shape.dim:
if d.dim_value == 0:
output_shape.append(None)
else:
output_shape.append(d.dim_value)
print(
f"Output Name: {graph_output.name}, Output Data Type: {graph_output.type.tensor_type.elem_type}, Output Shape: {output_shape}"
)
# To modify inputs and outputs, we would rather create new inputs and outputs.
# Using onnx.helper.make_tensor_value_info and onnx.helper.make_model
onnx.checker.check_model(model)
onnx.save(model, "convnets_modified.onnx")
if __name__ == "__main__":
main()
| 32.964912 | 138 | 0.640766 | import onnx
from typing import Iterable
def print_tensor_data(initializer: onnx.TensorProto) -> None:
if initializer.data_type == onnx.TensorProto.DataType.FLOAT:
print(initializer.float_data)
elif initializer.data_type == onnx.TensorProto.DataType.INT32:
print(initializer.int32_data)
elif initializer.data_type == onnx.TensorProto.DataType.INT64:
print(initializer.int64_data)
elif initializer.data_type == onnx.TensorProto.DataType.DOUBLE:
print(initializer.double_data)
elif initializer.data_type == onnx.TensorProto.DataType.UINT64:
print(initializer.uint64_data)
else:
raise NotImplementedError
return
def dims_prod(dims: Iterable) -> int:
prod = 1
for dim in dims:
prod *= dim
return prod
def main() -> None:
model = onnx.load("convnet.onnx")
onnx.checker.check_model(model)
graph_def = model.graph
initializers = graph_def.initializer
for initializer in initializers:
print("Tensor information:")
print(
f"Tensor Name: {initializer.name}, Data Type: {initializer.data_type}, Shape: {initializer.dims}"
)
print("Tensor value before modification:")
print_tensor_data(initializer)
if initializer.data_type == onnx.TensorProto.DataType.FLOAT:
for i in range(dims_prod(initializer.dims)):
initializer.float_data[i] = 2
print("Tensor value after modification:")
print_tensor_data(initializer)
nodes = graph_def.node
for node in nodes:
print(node.name)
print(node.op_type)
print(node.input)
print(node.output)
if node.op_type == "BatchNormalization":
print("Attributes before adding:")
for attribute in node.attribute:
print(attribute)
epsilon_attribute = onnx.helper.make_attribute("epsilon", 1e-06)
node.attribute.extend([epsilon_attribute])
fter adding:")
for attribute in node.attribute:
print(attribute)
inputs = graph_def.input
for graph_input in inputs:
input_shape = []
for d in graph_input.type.tensor_type.shape.dim:
if d.dim_value == 0:
input_shape.append(None)
else:
input_shape.append(d.dim_value)
print(
f"Input Name: {graph_input.name}, Input Data Type: {graph_input.type.tensor_type.elem_type}, Input Shape: {input_shape}"
)
outputs = graph_def.output
for graph_output in outputs:
output_shape = []
for d in graph_output.type.tensor_type.shape.dim:
if d.dim_value == 0:
output_shape.append(None)
else:
output_shape.append(d.dim_value)
print(
f"Output Name: {graph_output.name}, Output Data Type: {graph_output.type.tensor_type.elem_type}, Output Shape: {output_shape}"
)
onnx.checker.check_model(model)
onnx.save(model, "convnets_modified.onnx")
if __name__ == "__main__":
main()
| true | true |
f73ad0bd123a4882951231708e68f80c96935bd9 | 3,387 | py | Python | main.py | meshy/wolves | 2ba863e3fbcf4693aceb796a03ad06325895039c | [
"MIT"
] | null | null | null | main.py | meshy/wolves | 2ba863e3fbcf4693aceb796a03ad06325895039c | [
"MIT"
] | null | null | null | main.py | meshy/wolves | 2ba863e3fbcf4693aceb796a03ad06325895039c | [
"MIT"
] | null | null | null | import random
import time
from functools import partial
from itertools import product
from blessings import Terminal
term = Terminal()
WINDOW_WIDTH = term.width // 2
WINDOW_HEIGHT = term.height - 1
WOLF = 'W'
RABBIT = 'R'
EMPTY = ' '
RABBIT_SURVIVAL = 60
WOLF_SURVIVAL = 80
WOLF_BREED = 80
def random_animal():
choice = random.randint(0, 10)
if choice == 0:
return WOLF
if choice in (1, 2):
return RABBIT
return EMPTY
def random_row():
return [random_animal() for _ in range(WINDOW_WIDTH)]
def random_board():
return [random_row() for _ in range(WINDOW_HEIGHT)]
def colour(cell):
if cell == WOLF:
return term.red(cell)
return cell
def print_board(rows):
rows = map(lambda r: ' '.join(map(colour, r)), rows)
result = term.move(0, 0) + '\n'.join(rows)
print(result, flush=True)
def get_neighbours(board, y, x):
above, below = y - 1, y + 1
left, right = x - 1, x + 1
above %= WINDOW_HEIGHT
below %= WINDOW_HEIGHT
left %= WINDOW_WIDTH
right %= WINDOW_WIDTH
combinations = list(product([above, y, below], [left, x, right]))
combinations.remove((y, x))
return [board[Y][X] for Y, X in combinations]
def beside_rabbit(board, y, x):
return any(filter(lambda cell: cell == RABBIT, get_neighbours(board, y, x)))
def beside_wolf(board, y, x):
return any(filter(lambda cell: cell == WOLF, get_neighbours(board, y, x)))
def next_animal_state(board, y, x):
# prey expand into all adjacent cells
# Includes random death rate, and random breed failure?
survival_chance = random.randrange(100)
current_state = board[y][x]
if current_state == RABBIT:
if beside_wolf(board, y, x):
return WOLF if survival_chance < WOLF_BREED else EMPTY
return RABBIT if survival_chance < RABBIT_SURVIVAL else EMPTY
if current_state == WOLF and beside_rabbit(board, y, x):
return WOLF if survival_chance < WOLF_SURVIVAL else EMPTY
return EMPTY
def next_board(board):
new_board = []
# predators breed over adjacent prey or die
for y in range(WINDOW_HEIGHT):
new_states = map(partial(next_animal_state, board, y), range(WINDOW_WIDTH))
new_board.append(''.join(new_states))
new_rabbit_coords = []
for y in range(WINDOW_HEIGHT):
new_rabbits_on_row = []
for x in range(WINDOW_WIDTH):
if board[y][x] == EMPTY and beside_rabbit(new_board, y, x):
new_rabbits_on_row.append((y, x))
new_rabbit_coords += new_rabbits_on_row
for y, x in new_rabbit_coords:
survival_chance = random.randrange(100)
if survival_chance >= RABBIT_SURVIVAL:
continue
row = new_board[y]
new_board[y] = row[:x] + RABBIT + row[(x + 1):]
return new_board
def print_totals(board):
num_wolves = 0
for row in board:
num_wolves += row.count(WOLF)
num_rabbits = 0
for row in board:
num_rabbits += row.count(RABBIT)
info = 'Wolves: {:6d}\tRabbits: {:6d}'.format(num_wolves, num_rabbits)
print(info, end='')
if __name__ == '__main__':
original_board = board = random_board()
print(term.clear)
with term.fullscreen():
while True:
print_board(board)
print_totals(board)
time.sleep(.07)
board = next_board(board)
| 25.276119 | 83 | 0.641571 | import random
import time
from functools import partial
from itertools import product
from blessings import Terminal
term = Terminal()
WINDOW_WIDTH = term.width // 2
WINDOW_HEIGHT = term.height - 1
WOLF = 'W'
RABBIT = 'R'
EMPTY = ' '
RABBIT_SURVIVAL = 60
WOLF_SURVIVAL = 80
WOLF_BREED = 80
def random_animal():
choice = random.randint(0, 10)
if choice == 0:
return WOLF
if choice in (1, 2):
return RABBIT
return EMPTY
def random_row():
return [random_animal() for _ in range(WINDOW_WIDTH)]
def random_board():
return [random_row() for _ in range(WINDOW_HEIGHT)]
def colour(cell):
if cell == WOLF:
return term.red(cell)
return cell
def print_board(rows):
rows = map(lambda r: ' '.join(map(colour, r)), rows)
result = term.move(0, 0) + '\n'.join(rows)
print(result, flush=True)
def get_neighbours(board, y, x):
above, below = y - 1, y + 1
left, right = x - 1, x + 1
above %= WINDOW_HEIGHT
below %= WINDOW_HEIGHT
left %= WINDOW_WIDTH
right %= WINDOW_WIDTH
combinations = list(product([above, y, below], [left, x, right]))
combinations.remove((y, x))
return [board[Y][X] for Y, X in combinations]
def beside_rabbit(board, y, x):
return any(filter(lambda cell: cell == RABBIT, get_neighbours(board, y, x)))
def beside_wolf(board, y, x):
return any(filter(lambda cell: cell == WOLF, get_neighbours(board, y, x)))
def next_animal_state(board, y, x):
survival_chance = random.randrange(100)
current_state = board[y][x]
if current_state == RABBIT:
if beside_wolf(board, y, x):
return WOLF if survival_chance < WOLF_BREED else EMPTY
return RABBIT if survival_chance < RABBIT_SURVIVAL else EMPTY
if current_state == WOLF and beside_rabbit(board, y, x):
return WOLF if survival_chance < WOLF_SURVIVAL else EMPTY
return EMPTY
def next_board(board):
new_board = []
for y in range(WINDOW_HEIGHT):
new_states = map(partial(next_animal_state, board, y), range(WINDOW_WIDTH))
new_board.append(''.join(new_states))
new_rabbit_coords = []
for y in range(WINDOW_HEIGHT):
new_rabbits_on_row = []
for x in range(WINDOW_WIDTH):
if board[y][x] == EMPTY and beside_rabbit(new_board, y, x):
new_rabbits_on_row.append((y, x))
new_rabbit_coords += new_rabbits_on_row
for y, x in new_rabbit_coords:
survival_chance = random.randrange(100)
if survival_chance >= RABBIT_SURVIVAL:
continue
row = new_board[y]
new_board[y] = row[:x] + RABBIT + row[(x + 1):]
return new_board
def print_totals(board):
num_wolves = 0
for row in board:
num_wolves += row.count(WOLF)
num_rabbits = 0
for row in board:
num_rabbits += row.count(RABBIT)
info = 'Wolves: {:6d}\tRabbits: {:6d}'.format(num_wolves, num_rabbits)
print(info, end='')
if __name__ == '__main__':
original_board = board = random_board()
print(term.clear)
with term.fullscreen():
while True:
print_board(board)
print_totals(board)
time.sleep(.07)
board = next_board(board)
| true | true |
f73ad104d4d077b3f743260bae881c70d73ca3d9 | 474 | py | Python | examples/rename_all.py | tardyp/pyserde | 2bef77d9888ffcc650f031f0e883cb2ff08cbf60 | [
"MIT"
] | null | null | null | examples/rename_all.py | tardyp/pyserde | 2bef77d9888ffcc650f031f0e883cb2ff08cbf60 | [
"MIT"
] | null | null | null | examples/rename_all.py | tardyp/pyserde | 2bef77d9888ffcc650f031f0e883cb2ff08cbf60 | [
"MIT"
] | null | null | null | """
rename_all.py
Usage:
$ poetry install
$ poetry run python rename_all.py
"""
from typing import Optional
from serde import serde
from serde.json import from_json, to_json
@serde(rename_all='pascalcase')
class Foo:
name: str
no: Optional[int] = None
def main():
f = Foo('Pikachu')
print(f"Into Json: {to_json(f)}")
s = '{"Name": "Pikachu", "No": 25}'
print(f"From Json: {from_json(Foo, s)}")
if __name__ == '__main__':
main()
| 15.290323 | 44 | 0.628692 | from typing import Optional
from serde import serde
from serde.json import from_json, to_json
@serde(rename_all='pascalcase')
class Foo:
name: str
no: Optional[int] = None
def main():
f = Foo('Pikachu')
print(f"Into Json: {to_json(f)}")
s = '{"Name": "Pikachu", "No": 25}'
print(f"From Json: {from_json(Foo, s)}")
if __name__ == '__main__':
main()
| true | true |
f73ad178b57b6abb868e94e2878b8fe9ffc502b7 | 332 | py | Python | examples/redirect_example.py | Varriount/sanic | 55c36e0240dfeb03deccdeb5a53ca7fcfa728bff | [
"MIT"
] | 4,959 | 2018-09-13T08:42:51.000Z | 2021-01-05T07:01:44.000Z | examples/redirect_example.py | Varriount/sanic | 55c36e0240dfeb03deccdeb5a53ca7fcfa728bff | [
"MIT"
] | 864 | 2018-09-13T20:48:04.000Z | 2021-01-05T07:33:30.000Z | examples/redirect_example.py | Varriount/sanic | 55c36e0240dfeb03deccdeb5a53ca7fcfa728bff | [
"MIT"
] | 612 | 2018-09-13T21:10:04.000Z | 2020-12-30T12:16:36.000Z | from sanic import Sanic
from sanic import response
app = Sanic(__name__)
@app.route('/')
def handle_request(request):
return response.redirect('/redirect')
@app.route('/redirect')
async def test(request):
return response.json({"Redirected": True})
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000) | 18.444444 | 46 | 0.686747 | from sanic import Sanic
from sanic import response
app = Sanic(__name__)
@app.route('/')
def handle_request(request):
return response.redirect('/redirect')
@app.route('/redirect')
async def test(request):
return response.json({"Redirected": True})
if __name__ == '__main__':
app.run(host="0.0.0.0", port=8000) | true | true |
f73ad1edeeb3bfe7e4f5b2dbc7711529091366e9 | 21,563 | py | Python | tests/test_graphqlview.py | mazvv/aiohttp-graphql | 21a10f20524ab365669b9495fc5952c184b1004b | [
"MIT"
] | null | null | null | tests/test_graphqlview.py | mazvv/aiohttp-graphql | 21a10f20524ab365669b9495fc5952c184b1004b | [
"MIT"
] | null | null | null | tests/test_graphqlview.py | mazvv/aiohttp-graphql | 21a10f20524ab365669b9495fc5952c184b1004b | [
"MIT"
] | null | null | null | import json
from urllib.parse import urlencode
import pytest
from aiohttp import FormData
from graphql.execution.executors.asyncio import AsyncioExecutor
from graphql.execution.executors.sync import SyncExecutor
from aiohttp_graphql import GraphQLView
from .schema import Schema, AsyncSchema
# pylint: disable=invalid-name
# pylint: disable=protected-access
@pytest.fixture
def view_kwargs():
return {"schema": Schema}
@pytest.mark.parametrize(
"view,expected",
[
(GraphQLView(schema=Schema), False),
(GraphQLView(schema=Schema, executor=SyncExecutor()), False),
(GraphQLView(schema=Schema, executor=AsyncioExecutor()), True),
],
)
def test_eval(view, expected):
assert view.enable_async == expected
@pytest.mark.asyncio
async def test_allows_get_with_query_param(client, url_builder):
response = await client.get(url_builder(query="{test}"))
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello World"}}
@pytest.mark.asyncio
async def test_allows_get_with_variable_values(client, url_builder):
response = await client.get(
url_builder(
query="query helloWho($who: String) { test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
)
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_allows_get_with_operation_name(client, url_builder):
response = await client.get(
url_builder(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
)
)
assert response.status == 200
assert await response.json() == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.asyncio
async def test_reports_validation_errors(client, url_builder):
response = await client.get(url_builder(query="{ test, unknownOne, unknownTwo }"))
assert response.status == 400
assert await response.json() == {
"errors": [
{
"message": 'Cannot query field "unknownOne" on type "QueryRoot".',
"locations": [{"line": 1, "column": 9}],
},
{
"message": 'Cannot query field "unknownTwo" on type "QueryRoot".',
"locations": [{"line": 1, "column": 21}],
},
],
}
@pytest.mark.asyncio
async def test_errors_when_missing_operation_name(client, url_builder):
response = await client.get(
url_builder(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
subscription TestSubscriptions { subscriptionsTest { test } }
"""
)
)
assert response.status == 400
assert await response.json() == {
"errors": [
{
"message": (
"Must provide operation name if query contains multiple "
"operations."
),
},
]
}
@pytest.mark.asyncio
async def test_errors_when_sending_a_mutation_via_get(client, url_builder):
response = await client.get(
url_builder(
query="""
mutation TestMutation { writeTest { test } }
"""
)
)
assert response.status == 405
assert await response.json() == {
"errors": [
{"message": "Can only perform a mutation operation from a POST request."},
],
}
@pytest.mark.asyncio
async def test_errors_when_selecting_a_mutation_within_a_get(client, url_builder):
response = await client.get(
url_builder(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestMutation",
)
)
assert response.status == 405
assert await response.json() == {
"errors": [
{"message": "Can only perform a mutation operation from a POST request."},
],
}
@pytest.mark.asyncio
async def test_errors_when_selecting_a_subscription_within_a_get(
client, url_builder,
):
response = await client.get(
url_builder(
query="""
subscription TestSubscriptions { subscriptionsTest { test } }
""",
operationName="TestSubscriptions",
)
)
assert response.status == 405
assert await response.json() == {
"errors": [
{
"message": (
"Can only perform a subscription operation from a POST " "request."
)
},
],
}
@pytest.mark.asyncio
async def test_allows_mutation_to_exist_within_a_get(client, url_builder):
response = await client.get(
url_builder(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestQuery",
)
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello World"}}
@pytest.mark.asyncio
async def test_allows_post_with_json_encoding(client, base_url):
response = await client.post(
base_url,
data=json.dumps(dict(query="{test}")),
headers={"content-type": "application/json"},
)
assert await response.json() == {"data": {"test": "Hello World"}}
assert response.status == 200
@pytest.mark.asyncio
async def test_allows_sending_a_mutation_via_post(client, base_url):
response = await client.post(
base_url,
data=json.dumps(dict(query="mutation TestMutation { writeTest { test } }",)),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == {"data": {"writeTest": {"test": "Hello World"}}}
@pytest.mark.asyncio
async def test_errors_when_sending_a_subscription_without_allow(client, base_url):
response = await client.post(
base_url,
data=json.dumps(
dict(
query="""
subscription TestSubscriptions { subscriptionsTest { test } }
""",
)
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == {
"data": None,
"errors": [
{
"message": "Subscriptions are not allowed. You will need to "
"either use the subscribe function or pass "
"allow_subscriptions=True"
},
],
}
@pytest.mark.asyncio
async def test_allows_post_with_url_encoding(client, base_url):
data = FormData()
data.add_field("query", "{test}")
response = await client.post(
base_url,
data=data(),
headers={"content-type": "application/x-www-form-urlencoded"},
)
assert await response.json() == {"data": {"test": "Hello World"}}
assert response.status == 200
@pytest.mark.asyncio
async def test_supports_post_json_query_with_string_variables(client, base_url):
response = await client.post(
base_url,
data=json.dumps(
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
)
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_supports_post_json_query_with_json_variables(client, base_url):
response = await client.post(
base_url,
data=json.dumps(
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
)
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_supports_post_url_encoded_query_with_string_variables(client, base_url):
response = await client.post(
base_url,
data=urlencode(
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
),
),
headers={"content-type": "application/x-www-form-urlencoded"},
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_supports_post_json_quey_with_get_variable_values(client, url_builder):
response = await client.post(
url_builder(variables=json.dumps({"who": "Dolly"})),
data=json.dumps(dict(query="query helloWho($who: String){ test(who: $who) }",)),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_post_url_encoded_query_with_get_variable_values(client, url_builder):
response = await client.post(
url_builder(variables=json.dumps({"who": "Dolly"})),
data=urlencode(dict(query="query helloWho($who: String){ test(who: $who) }",)),
headers={"content-type": "application/x-www-form-urlencoded"},
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_supports_post_raw_text_query_with_get_variable_values(
client, url_builder
):
response = await client.post(
url_builder(variables=json.dumps({"who": "Dolly"})),
data="query helloWho($who: String){ test(who: $who) }",
headers={"content-type": "application/graphql"},
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_allows_post_with_operation_name(client, base_url):
response = await client.post(
base_url,
data=json.dumps(
dict(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
)
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.asyncio
async def test_allows_post_with_get_operation_name(client, url_builder):
response = await client.post(
url_builder(operationName="helloWorld"),
data="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
headers={"content-type": "application/graphql"},
)
assert response.status == 200
assert await response.json() == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.asyncio
async def test_supports_pretty_printing(client, url_builder):
response = await client.get(url_builder(query="{test}", pretty="1"))
text = await response.text()
assert text == ("{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}")
@pytest.mark.asyncio
async def test_not_pretty_by_default(client, url_builder):
response = await client.get(url_builder(query="{test}"))
assert await response.text() == ('{"data":{"test":"Hello World"}}')
@pytest.mark.asyncio
async def test_supports_pretty_printing_by_request(client, url_builder):
response = await client.get(url_builder(query="{test}", pretty="1"))
assert await response.text() == (
"{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}"
)
@pytest.mark.asyncio
async def test_handles_field_errors_caught_by_graphql(client, url_builder):
response = await client.get(url_builder(query="{thrower}"))
assert response.status == 200
assert await response.json() == {
"data": None,
"errors": [
{
"locations": [{"column": 2, "line": 1}],
"message": "Throws!",
"path": ["thrower"],
}
],
}
@pytest.mark.asyncio
async def test_handles_syntax_errors_caught_by_graphql(client, url_builder):
response = await client.get(url_builder(query="syntaxerror"))
assert response.status == 400
assert await response.json() == {
"errors": [
{
"locations": [{"column": 1, "line": 1}],
"message": (
"Syntax Error GraphQL (1:1) "
'Unexpected Name "syntaxerror"\n\n1: syntaxerror\n ^\n'
),
},
],
}
@pytest.mark.asyncio
async def test_handles_errors_caused_by_a_lack_of_query(client, base_url):
response = await client.get(base_url)
assert response.status == 400
assert await response.json() == {
"errors": [{"message": "Must provide query string."}]
}
@pytest.mark.asyncio
async def test_handles_batch_correctly_if_is_disabled(client, base_url):
response = await client.post(
base_url, data="[]", headers={"content-type": "application/json"},
)
assert response.status == 400
assert await response.json() == {
"errors": [{"message": "Batch GraphQL requests are not enabled."}]
}
@pytest.mark.asyncio
async def test_handles_incomplete_json_bodies(client, base_url):
response = await client.post(
base_url, data='{"query":', headers={"content-type": "application/json"},
)
assert response.status == 400
assert await response.json() == {
"errors": [{"message": "POST body sent invalid JSON."}]
}
@pytest.mark.asyncio
async def test_handles_plain_post_text(client, url_builder):
response = await client.post(
url_builder(variables=json.dumps({"who": "Dolly"})),
data="query helloWho($who: String){ test(who: $who) }",
headers={"content-type": "text/plain"},
)
assert response.status == 400
assert await response.json() == {
"errors": [{"message": "Must provide query string."}]
}
@pytest.mark.asyncio
async def test_handles_poorly_formed_variables(client, url_builder):
response = await client.get(
url_builder(
query="query helloWho($who: String){ test(who: $who) }", variables="who:You"
),
)
assert response.status == 400
assert await response.json() == {
"errors": [{"message": "Variables are invalid JSON."}]
}
@pytest.mark.asyncio
async def test_handles_unsupported_http_methods(client, url_builder):
response = await client.put(url_builder(query="{test}"))
assert response.status == 405
assert response.headers["Allow"] in ["GET, POST", "HEAD, GET, POST, OPTIONS"]
assert await response.json() == {
"errors": [{"message": "GraphQL only supports GET and POST requests."}]
}
@pytest.mark.asyncio
async def test_passes_request_into_request_context(client, url_builder):
response = await client.get(url_builder(query="{request}", q="testing"))
assert response.status == 200
assert await response.json() == {
"data": {"request": "testing"},
}
class TestCustomContext:
@pytest.fixture
def view_kwargs(self, request, view_kwargs):
# pylint: disable=no-self-use
# pylint: disable=redefined-outer-name
view_kwargs.update(context=request.param)
return view_kwargs
@pytest.mark.parametrize(
"view_kwargs",
["CUSTOM CONTEXT", {"CUSTOM_CONTEXT": "test"}],
indirect=True,
ids=repr,
)
@pytest.mark.asyncio
async def test_context_remapped(self, client, url_builder):
response = await client.get(url_builder(query="{context}"))
_json = await response.json()
assert response.status == 200
assert "request" in _json["data"]["context"]
assert "CUSTOM CONTEXT" not in _json["data"]["context"]
@pytest.mark.parametrize(
"view_kwargs", [{"request": "test"}], indirect=True, ids=repr
)
@pytest.mark.asyncio
async def test_request_not_replaced(self, client, url_builder):
response = await client.get(url_builder(query="{context}"))
_json = await response.json()
assert response.status == 200
assert "request" in _json["data"]["context"]
assert _json["data"]["context"] == str({"request": "test"})
@pytest.mark.asyncio
async def test_post_multipart_data(client, base_url):
# pylint: disable=line-too-long
query = "mutation TestMutation { writeTest { test } }"
data = (
"------aiohttpgraphql\r\n"
+ 'Content-Disposition: form-data; name="query"\r\n'
+ "\r\n"
+ query
+ "\r\n"
+ "------aiohttpgraphql--\r\n"
+ "Content-Type: text/plain; charset=utf-8\r\n"
+ 'Content-Disposition: form-data; name="file"; filename="text1.txt"; filename*=utf-8\'\'text1.txt\r\n'
+ "\r\n"
+ "\r\n"
+ "------aiohttpgraphql--\r\n"
)
response = await client.post(
base_url,
data=data,
headers={"content-type": "multipart/form-data; boundary=----aiohttpgraphql"},
)
assert response.status == 200
assert await response.json() == {"data": {u"writeTest": {u"test": u"Hello World"}}}
class TestBatchExecutor:
@pytest.fixture
def view_kwargs(self, view_kwargs):
# pylint: disable=no-self-use
# pylint: disable=redefined-outer-name
view_kwargs.update(batch=True)
return view_kwargs
@pytest.mark.asyncio
async def test_batch_allows_post_with_json_encoding(self, client, base_url):
response = await client.post(
base_url,
data=json.dumps([dict(id=1, query="{test}")]),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == [{"data": {"test": "Hello World"}}]
@pytest.mark.asyncio
async def test_batch_supports_post_json_query_with_json_variables(
self, client, base_url
):
response = await client.post(
base_url,
data=json.dumps(
[
dict(
id=1,
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
)
]
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == [{"data": {"test": "Hello Dolly"}}]
@pytest.mark.asyncio
async def test_batch_allows_post_with_operation_name(self, client, base_url):
response = await client.post(
base_url,
data=json.dumps(
[
dict(
id=1,
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
)
]
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == [
{"data": {"test": "Hello World", "shared": "Hello Everyone"}}
]
class TestAsyncSchema:
@pytest.fixture
def executor(self, event_loop):
# pylint: disable=no-self-use
# Only need to test with the AsyncExecutor
return AsyncioExecutor(loop=event_loop)
@pytest.fixture
def view_kwargs(self, view_kwargs):
# pylint: disable=no-self-use
# pylint: disable=redefined-outer-name
view_kwargs.update(schema=AsyncSchema)
return view_kwargs
@pytest.mark.asyncio
async def test_async_schema(self, client, url_builder):
response = await client.get(url_builder(query="{a,b,c}"))
assert response.status == 200
assert await response.json() == {"data": {"a": "hey", "b": "hey2", "c": "hey3"}}
@pytest.mark.asyncio
async def test_preflight_request(client, base_url):
response = await client.options(
base_url, headers={"Access-Control-Request-Method": "POST"},
)
assert response.status == 200
@pytest.mark.asyncio
async def test_preflight_incorrect_request(client, base_url):
response = await client.options(
base_url, headers={"Access-Control-Request-Method": "OPTIONS"},
)
assert response.status == 400
| 30.285112 | 111 | 0.593702 | import json
from urllib.parse import urlencode
import pytest
from aiohttp import FormData
from graphql.execution.executors.asyncio import AsyncioExecutor
from graphql.execution.executors.sync import SyncExecutor
from aiohttp_graphql import GraphQLView
from .schema import Schema, AsyncSchema
@pytest.fixture
def view_kwargs():
return {"schema": Schema}
@pytest.mark.parametrize(
"view,expected",
[
(GraphQLView(schema=Schema), False),
(GraphQLView(schema=Schema, executor=SyncExecutor()), False),
(GraphQLView(schema=Schema, executor=AsyncioExecutor()), True),
],
)
def test_eval(view, expected):
assert view.enable_async == expected
@pytest.mark.asyncio
async def test_allows_get_with_query_param(client, url_builder):
response = await client.get(url_builder(query="{test}"))
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello World"}}
@pytest.mark.asyncio
async def test_allows_get_with_variable_values(client, url_builder):
response = await client.get(
url_builder(
query="query helloWho($who: String) { test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
)
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_allows_get_with_operation_name(client, url_builder):
response = await client.get(
url_builder(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
)
)
assert response.status == 200
assert await response.json() == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.asyncio
async def test_reports_validation_errors(client, url_builder):
response = await client.get(url_builder(query="{ test, unknownOne, unknownTwo }"))
assert response.status == 400
assert await response.json() == {
"errors": [
{
"message": 'Cannot query field "unknownOne" on type "QueryRoot".',
"locations": [{"line": 1, "column": 9}],
},
{
"message": 'Cannot query field "unknownTwo" on type "QueryRoot".',
"locations": [{"line": 1, "column": 21}],
},
],
}
@pytest.mark.asyncio
async def test_errors_when_missing_operation_name(client, url_builder):
response = await client.get(
url_builder(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
subscription TestSubscriptions { subscriptionsTest { test } }
"""
)
)
assert response.status == 400
assert await response.json() == {
"errors": [
{
"message": (
"Must provide operation name if query contains multiple "
"operations."
),
},
]
}
@pytest.mark.asyncio
async def test_errors_when_sending_a_mutation_via_get(client, url_builder):
response = await client.get(
url_builder(
query="""
mutation TestMutation { writeTest { test } }
"""
)
)
assert response.status == 405
assert await response.json() == {
"errors": [
{"message": "Can only perform a mutation operation from a POST request."},
],
}
@pytest.mark.asyncio
async def test_errors_when_selecting_a_mutation_within_a_get(client, url_builder):
response = await client.get(
url_builder(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestMutation",
)
)
assert response.status == 405
assert await response.json() == {
"errors": [
{"message": "Can only perform a mutation operation from a POST request."},
],
}
@pytest.mark.asyncio
async def test_errors_when_selecting_a_subscription_within_a_get(
client, url_builder,
):
response = await client.get(
url_builder(
query="""
subscription TestSubscriptions { subscriptionsTest { test } }
""",
operationName="TestSubscriptions",
)
)
assert response.status == 405
assert await response.json() == {
"errors": [
{
"message": (
"Can only perform a subscription operation from a POST " "request."
)
},
],
}
@pytest.mark.asyncio
async def test_allows_mutation_to_exist_within_a_get(client, url_builder):
response = await client.get(
url_builder(
query="""
query TestQuery { test }
mutation TestMutation { writeTest { test } }
""",
operationName="TestQuery",
)
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello World"}}
@pytest.mark.asyncio
async def test_allows_post_with_json_encoding(client, base_url):
response = await client.post(
base_url,
data=json.dumps(dict(query="{test}")),
headers={"content-type": "application/json"},
)
assert await response.json() == {"data": {"test": "Hello World"}}
assert response.status == 200
@pytest.mark.asyncio
async def test_allows_sending_a_mutation_via_post(client, base_url):
response = await client.post(
base_url,
data=json.dumps(dict(query="mutation TestMutation { writeTest { test } }",)),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == {"data": {"writeTest": {"test": "Hello World"}}}
@pytest.mark.asyncio
async def test_errors_when_sending_a_subscription_without_allow(client, base_url):
response = await client.post(
base_url,
data=json.dumps(
dict(
query="""
subscription TestSubscriptions { subscriptionsTest { test } }
""",
)
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == {
"data": None,
"errors": [
{
"message": "Subscriptions are not allowed. You will need to "
"either use the subscribe function or pass "
"allow_subscriptions=True"
},
],
}
@pytest.mark.asyncio
async def test_allows_post_with_url_encoding(client, base_url):
data = FormData()
data.add_field("query", "{test}")
response = await client.post(
base_url,
data=data(),
headers={"content-type": "application/x-www-form-urlencoded"},
)
assert await response.json() == {"data": {"test": "Hello World"}}
assert response.status == 200
@pytest.mark.asyncio
async def test_supports_post_json_query_with_string_variables(client, base_url):
response = await client.post(
base_url,
data=json.dumps(
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
)
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_supports_post_json_query_with_json_variables(client, base_url):
response = await client.post(
base_url,
data=json.dumps(
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
)
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_supports_post_url_encoded_query_with_string_variables(client, base_url):
response = await client.post(
base_url,
data=urlencode(
dict(
query="query helloWho($who: String){ test(who: $who) }",
variables=json.dumps({"who": "Dolly"}),
),
),
headers={"content-type": "application/x-www-form-urlencoded"},
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_supports_post_json_quey_with_get_variable_values(client, url_builder):
response = await client.post(
url_builder(variables=json.dumps({"who": "Dolly"})),
data=json.dumps(dict(query="query helloWho($who: String){ test(who: $who) }",)),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_post_url_encoded_query_with_get_variable_values(client, url_builder):
response = await client.post(
url_builder(variables=json.dumps({"who": "Dolly"})),
data=urlencode(dict(query="query helloWho($who: String){ test(who: $who) }",)),
headers={"content-type": "application/x-www-form-urlencoded"},
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_supports_post_raw_text_query_with_get_variable_values(
client, url_builder
):
response = await client.post(
url_builder(variables=json.dumps({"who": "Dolly"})),
data="query helloWho($who: String){ test(who: $who) }",
headers={"content-type": "application/graphql"},
)
assert response.status == 200
assert await response.json() == {"data": {"test": "Hello Dolly"}}
@pytest.mark.asyncio
async def test_allows_post_with_operation_name(client, base_url):
response = await client.post(
base_url,
data=json.dumps(
dict(
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
)
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.asyncio
async def test_allows_post_with_get_operation_name(client, url_builder):
response = await client.post(
url_builder(operationName="helloWorld"),
data="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
headers={"content-type": "application/graphql"},
)
assert response.status == 200
assert await response.json() == {
"data": {"test": "Hello World", "shared": "Hello Everyone"}
}
@pytest.mark.asyncio
async def test_supports_pretty_printing(client, url_builder):
response = await client.get(url_builder(query="{test}", pretty="1"))
text = await response.text()
assert text == ("{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}")
@pytest.mark.asyncio
async def test_not_pretty_by_default(client, url_builder):
response = await client.get(url_builder(query="{test}"))
assert await response.text() == ('{"data":{"test":"Hello World"}}')
@pytest.mark.asyncio
async def test_supports_pretty_printing_by_request(client, url_builder):
response = await client.get(url_builder(query="{test}", pretty="1"))
assert await response.text() == (
"{\n" ' "data": {\n' ' "test": "Hello World"\n' " }\n" "}"
)
@pytest.mark.asyncio
async def test_handles_field_errors_caught_by_graphql(client, url_builder):
response = await client.get(url_builder(query="{thrower}"))
assert response.status == 200
assert await response.json() == {
"data": None,
"errors": [
{
"locations": [{"column": 2, "line": 1}],
"message": "Throws!",
"path": ["thrower"],
}
],
}
@pytest.mark.asyncio
async def test_handles_syntax_errors_caught_by_graphql(client, url_builder):
response = await client.get(url_builder(query="syntaxerror"))
assert response.status == 400
assert await response.json() == {
"errors": [
{
"locations": [{"column": 1, "line": 1}],
"message": (
"Syntax Error GraphQL (1:1) "
'Unexpected Name "syntaxerror"\n\n1: syntaxerror\n ^\n'
),
},
],
}
@pytest.mark.asyncio
async def test_handles_errors_caused_by_a_lack_of_query(client, base_url):
response = await client.get(base_url)
assert response.status == 400
assert await response.json() == {
"errors": [{"message": "Must provide query string."}]
}
@pytest.mark.asyncio
async def test_handles_batch_correctly_if_is_disabled(client, base_url):
response = await client.post(
base_url, data="[]", headers={"content-type": "application/json"},
)
assert response.status == 400
assert await response.json() == {
"errors": [{"message": "Batch GraphQL requests are not enabled."}]
}
@pytest.mark.asyncio
async def test_handles_incomplete_json_bodies(client, base_url):
response = await client.post(
base_url, data='{"query":', headers={"content-type": "application/json"},
)
assert response.status == 400
assert await response.json() == {
"errors": [{"message": "POST body sent invalid JSON."}]
}
@pytest.mark.asyncio
async def test_handles_plain_post_text(client, url_builder):
response = await client.post(
url_builder(variables=json.dumps({"who": "Dolly"})),
data="query helloWho($who: String){ test(who: $who) }",
headers={"content-type": "text/plain"},
)
assert response.status == 400
assert await response.json() == {
"errors": [{"message": "Must provide query string."}]
}
@pytest.mark.asyncio
async def test_handles_poorly_formed_variables(client, url_builder):
response = await client.get(
url_builder(
query="query helloWho($who: String){ test(who: $who) }", variables="who:You"
),
)
assert response.status == 400
assert await response.json() == {
"errors": [{"message": "Variables are invalid JSON."}]
}
@pytest.mark.asyncio
async def test_handles_unsupported_http_methods(client, url_builder):
response = await client.put(url_builder(query="{test}"))
assert response.status == 405
assert response.headers["Allow"] in ["GET, POST", "HEAD, GET, POST, OPTIONS"]
assert await response.json() == {
"errors": [{"message": "GraphQL only supports GET and POST requests."}]
}
@pytest.mark.asyncio
async def test_passes_request_into_request_context(client, url_builder):
response = await client.get(url_builder(query="{request}", q="testing"))
assert response.status == 200
assert await response.json() == {
"data": {"request": "testing"},
}
class TestCustomContext:
@pytest.fixture
def view_kwargs(self, request, view_kwargs):
view_kwargs.update(context=request.param)
return view_kwargs
@pytest.mark.parametrize(
"view_kwargs",
["CUSTOM CONTEXT", {"CUSTOM_CONTEXT": "test"}],
indirect=True,
ids=repr,
)
@pytest.mark.asyncio
async def test_context_remapped(self, client, url_builder):
response = await client.get(url_builder(query="{context}"))
_json = await response.json()
assert response.status == 200
assert "request" in _json["data"]["context"]
assert "CUSTOM CONTEXT" not in _json["data"]["context"]
@pytest.mark.parametrize(
"view_kwargs", [{"request": "test"}], indirect=True, ids=repr
)
@pytest.mark.asyncio
async def test_request_not_replaced(self, client, url_builder):
response = await client.get(url_builder(query="{context}"))
_json = await response.json()
assert response.status == 200
assert "request" in _json["data"]["context"]
assert _json["data"]["context"] == str({"request": "test"})
@pytest.mark.asyncio
async def test_post_multipart_data(client, base_url):
query = "mutation TestMutation { writeTest { test } }"
data = (
"------aiohttpgraphql\r\n"
+ 'Content-Disposition: form-data; name="query"\r\n'
+ "\r\n"
+ query
+ "\r\n"
+ "------aiohttpgraphql--\r\n"
+ "Content-Type: text/plain; charset=utf-8\r\n"
+ 'Content-Disposition: form-data; name="file"; filename="text1.txt"; filename*=utf-8\'\'text1.txt\r\n'
+ "\r\n"
+ "\r\n"
+ "------aiohttpgraphql--\r\n"
)
response = await client.post(
base_url,
data=data,
headers={"content-type": "multipart/form-data; boundary=----aiohttpgraphql"},
)
assert response.status == 200
assert await response.json() == {"data": {u"writeTest": {u"test": u"Hello World"}}}
class TestBatchExecutor:
@pytest.fixture
def view_kwargs(self, view_kwargs):
view_kwargs.update(batch=True)
return view_kwargs
@pytest.mark.asyncio
async def test_batch_allows_post_with_json_encoding(self, client, base_url):
response = await client.post(
base_url,
data=json.dumps([dict(id=1, query="{test}")]),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == [{"data": {"test": "Hello World"}}]
@pytest.mark.asyncio
async def test_batch_supports_post_json_query_with_json_variables(
self, client, base_url
):
response = await client.post(
base_url,
data=json.dumps(
[
dict(
id=1,
query="query helloWho($who: String){ test(who: $who) }",
variables={"who": "Dolly"},
)
]
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == [{"data": {"test": "Hello Dolly"}}]
@pytest.mark.asyncio
async def test_batch_allows_post_with_operation_name(self, client, base_url):
response = await client.post(
base_url,
data=json.dumps(
[
dict(
id=1,
query="""
query helloYou { test(who: "You"), ...shared }
query helloWorld { test(who: "World"), ...shared }
query helloDolly { test(who: "Dolly"), ...shared }
fragment shared on QueryRoot {
shared: test(who: "Everyone")
}
""",
operationName="helloWorld",
)
]
),
headers={"content-type": "application/json"},
)
assert response.status == 200
assert await response.json() == [
{"data": {"test": "Hello World", "shared": "Hello Everyone"}}
]
class TestAsyncSchema:
@pytest.fixture
def executor(self, event_loop):
return AsyncioExecutor(loop=event_loop)
@pytest.fixture
def view_kwargs(self, view_kwargs):
view_kwargs.update(schema=AsyncSchema)
return view_kwargs
@pytest.mark.asyncio
async def test_async_schema(self, client, url_builder):
response = await client.get(url_builder(query="{a,b,c}"))
assert response.status == 200
assert await response.json() == {"data": {"a": "hey", "b": "hey2", "c": "hey3"}}
@pytest.mark.asyncio
async def test_preflight_request(client, base_url):
response = await client.options(
base_url, headers={"Access-Control-Request-Method": "POST"},
)
assert response.status == 200
@pytest.mark.asyncio
async def test_preflight_incorrect_request(client, base_url):
response = await client.options(
base_url, headers={"Access-Control-Request-Method": "OPTIONS"},
)
assert response.status == 400
| true | true |
f73ad21d6443342dd519e264d972ec7dd8c1fc31 | 695 | py | Python | setup.py | qiuqiangkong/autoth | 42a8017b71a98441c557ed9768e427c5f627249f | [
"MIT"
] | 7 | 2020-03-06T10:25:59.000Z | 2022-01-26T11:24:42.000Z | setup.py | qiuqiangkong/autoth | 42a8017b71a98441c557ed9768e427c5f627249f | [
"MIT"
] | null | null | null | setup.py | qiuqiangkong/autoth | 42a8017b71a98441c557ed9768e427c5f627249f | [
"MIT"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="autoth-qiuqiangkong", # Replace with your own username
version="0.0.3",
author="Qiuqiang Kong",
author_email="qiuqiangkong@gmail.com",
description="Automatic threshold optimization",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/qiuqiangkong/autoth",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| 30.217391 | 64 | 0.679137 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="autoth-qiuqiangkong",
version="0.0.3",
author="Qiuqiang Kong",
author_email="qiuqiangkong@gmail.com",
description="Automatic threshold optimization",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/qiuqiangkong/autoth",
packages=setuptools.find_packages(),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
)
| true | true |
f73ad2395f0b8f744c03fc1b619590f37365d42c | 5,565 | py | Python | src/lecture9/solver.py | wakky927/Computational-Engineering-B | 3720d96668a32dc73f38ed0bc8afe4705452de9e | [
"MIT"
] | 1 | 2021-05-03T09:11:35.000Z | 2021-05-03T09:11:35.000Z | src/lecture9/solver.py | wakky927/Computational-Engineering-B | 3720d96668a32dc73f38ed0bc8afe4705452de9e | [
"MIT"
] | null | null | null | src/lecture9/solver.py | wakky927/Computational-Engineering-B | 3720d96668a32dc73f38ed0bc8afe4705452de9e | [
"MIT"
] | null | null | null | import numpy as np
import condition
def solve_matrix(p, Ap, Ae, Aw, An, As, bb, m, n):
md = 101
nd = 101
p_old = np.zeros((md, nd))
''' SOR algorithm '''
iter_max = 300 # SOR max iteration steps
relax_factor = 1.8 # SOR relaxation factor
for iter_i in range(1, iter_max):
error = 0.0
for i in range(1, m + 1):
for j in range(1, n + 1):
p_old[i][j] = p[i][j]
for i in range(1, m + 1):
for j in range(1, n + 1):
p[i][j] = (bb[i][j] - Ae[i][j] * p_old[i + 1][j] - Aw[i][j]
* p[i - 1][j] - An[i][j] * p_old[i][j + 1]
- As[i][j] * p[i][j - 1]) / Ap[i][j]\
* relax_factor + p_old[i][j] * (1 - relax_factor)
a = np.abs(p[i][j] - p_old[i][j])
e = max(error, a)
error = e
# print(f"iteration no. {iter_i} -- error = {error}")
return
def solve_p(p, u, v, u_old, v_old, nue, density, dx, dy, dt, m, n):
md = 101
nd = 101
Ap = np.zeros((md, nd))
Ae = np.zeros((md, nd))
Aw = np.zeros((md, nd))
An = np.zeros((md, nd))
As = np.zeros((md, nd))
bb = np.zeros((md, nd))
u_stg = 0.0
v_stg = 0.0
for i in range(1, m + 1):
for j in range(1, n + 1):
''' velocity u '''
# convection_x (1st upwind scheme)
u[i][j] = u_old[i][j]\
- dt * max(u_old[i][j], 0.0)\
* (u_old[i][j] - u_old[i - 1][j]) / dx\
- dt * min(u_old[i][j], 0.0)\
* (u_old[i + 1][j] - u_old[i][j]) / dx
# convection_y
# v_stg = 0.25 * (v_old[i][j] + v_old[i + 1][j] + v_old[i][j - 1]
# + v_old[i + 1][j - 1]) # Staggered grid
u[i][j] = u[i][j]\
- dt * max(v_old[i][j], 0.0)\
* (u_old[i][j] - u_old[i][j - 1]) / dy\
- dt * min(v_old[i][j], 0.0)\
* (u_old[i][j + 1] - u_old[i][j]) / dy
# diffusion_x
u[i][j] = u[i][j]\
+ dt * nue * (u_old[i + 1][j] - 2 * u_old[i][j]
+ u_old[i - 1][j]) / dx**2
# diffusion_y
u[i][j] = u[i][j] \
+ dt * nue * (u_old[i][j + 1] - 2 * u_old[i][j]
+ u_old[i][j - 1]) / dy**2
''' velocity v '''
# convection_x (1st upwind scheme)
# u_stg = 0.25 * (u_old[i][j] + u_old[i - 1][j] + u_old[i][j + 1]
# + u_old[i - 1][j + 1]) # Staggered grid
v[i][j] = v_old[i][j] \
- dt * max(u_old[i][j], 0.0) \
* (v_old[i][j] - v_old[i - 1][j]) / dx \
- dt * min(u_old[i][j], 0.0) \
* (v_old[i + 1][j] - v_old[i][j]) / dx
# convection_y
v[i][j] = v[i][j] \
- dt * max(v_old[i][j], 0.0) \
* (v_old[i][j] - v_old[i][j - 1]) / dy \
- dt * min(v_old[i][j], 0.0) \
* (v_old[i][j + 1] - v_old[i][j]) / dy
# diffusion_x
v[i][j] = v[i][j] \
+ dt * nue * (v_old[i + 1][j] - 2 * v_old[i][j]
+ v_old[i - 1][j]) / dx**2
# diffusion_y
v[i][j] = v[i][j] \
+ dt * nue * (v_old[i][j + 1] - 2 * v_old[i][j]
+ v_old[i][j - 1]) / dy**2
''' matrix solution '''
for i in range(1, m + 1):
for j in range(1, n + 1):
Ae[i][j] = dt / density / dx**2
Aw[i][j] = dt / density / dx**2
An[i][j] = dt / density / dy**2
As[i][j] = dt / density / dy**2
Ap[i][j] = - Ae[i][j] - Aw[i][j] - An[i][j] - As[i][j]
# bb[i][j] = (u[i][j] - u[i - 1][j]) / dx\
# + (v[i][j] - v[i][j - 1]) / dy
bb[i][j] = (u[i + 1][j] - u[i - 1][j]) / dx / 2 \
+ (v[i][j + 1] - v[i][j - 1]) / dy / 2
condition.matrix_c(p, Ap, Ae, Aw, An, As, bb, m, n)
solve_matrix(p, Ap, Ae, Aw, An, As, bb, m, n)
return
def solve_u(p, u, density, dx, dt, m, n):
for i in range(1, m + 1):
for j in range(1, n + 1):
# convection_x (1st upwind scheme) -> already calculated in solve_p
# convection_y -> already calculated in solve_p
# diffusion_x -> already calculated in solve_p
# diffusion_y -> already calculated in solve_p
# pressure
# u[i][j] = u[i][j]\
# - dt / density * (p[i + 1][j] - p[i][j]) / dx
u[i][j] = u[i][j] \
- dt / density * (p[i + 1][j] - p[i - 1][j]) / dx / 2
def solve_v(p, v, density, dy, dt, m, n):
for i in range(1, m + 1):
for j in range(1, n + 1):
# convection_x (1st upwind scheme) -> already calculated in solve_p
# convection_y -> already calculated in solve_p
# diffusion_x -> already calculated in solve_p
# diffusion_y -> already calculated in solve_p
# pressure
# v[i][j] = v[i][j] \
# - dt / density * (p[i][j + 1] - p[i][j]) / dy
v[i][j] = v[i][j] \
- dt / density * (p[i][j + 1] - p[i][j - 1]) / dy / 2
| 36.136364 | 79 | 0.369093 | import numpy as np
import condition
def solve_matrix(p, Ap, Ae, Aw, An, As, bb, m, n):
md = 101
nd = 101
p_old = np.zeros((md, nd))
iter_max = 300
relax_factor = 1.8
for iter_i in range(1, iter_max):
error = 0.0
for i in range(1, m + 1):
for j in range(1, n + 1):
p_old[i][j] = p[i][j]
for i in range(1, m + 1):
for j in range(1, n + 1):
p[i][j] = (bb[i][j] - Ae[i][j] * p_old[i + 1][j] - Aw[i][j]
* p[i - 1][j] - An[i][j] * p_old[i][j + 1]
- As[i][j] * p[i][j - 1]) / Ap[i][j]\
* relax_factor + p_old[i][j] * (1 - relax_factor)
a = np.abs(p[i][j] - p_old[i][j])
e = max(error, a)
error = e
return
def solve_p(p, u, v, u_old, v_old, nue, density, dx, dy, dt, m, n):
md = 101
nd = 101
Ap = np.zeros((md, nd))
Ae = np.zeros((md, nd))
Aw = np.zeros((md, nd))
An = np.zeros((md, nd))
As = np.zeros((md, nd))
bb = np.zeros((md, nd))
u_stg = 0.0
v_stg = 0.0
for i in range(1, m + 1):
for j in range(1, n + 1):
u[i][j] = u_old[i][j]\
- dt * max(u_old[i][j], 0.0)\
* (u_old[i][j] - u_old[i - 1][j]) / dx\
- dt * min(u_old[i][j], 0.0)\
* (u_old[i + 1][j] - u_old[i][j]) / dx
][j] = u[i][j]\
- dt * max(v_old[i][j], 0.0)\
* (u_old[i][j] - u_old[i][j - 1]) / dy\
- dt * min(v_old[i][j], 0.0)\
* (u_old[i][j + 1] - u_old[i][j]) / dy
u[i][j] = u[i][j]\
+ dt * nue * (u_old[i + 1][j] - 2 * u_old[i][j]
+ u_old[i - 1][j]) / dx**2
u[i][j] = u[i][j] \
+ dt * nue * (u_old[i][j + 1] - 2 * u_old[i][j]
+ u_old[i][j - 1]) / dy**2
][j] = v_old[i][j] \
- dt * max(u_old[i][j], 0.0) \
* (v_old[i][j] - v_old[i - 1][j]) / dx \
- dt * min(u_old[i][j], 0.0) \
* (v_old[i + 1][j] - v_old[i][j]) / dx
v[i][j] = v[i][j] \
- dt * max(v_old[i][j], 0.0) \
* (v_old[i][j] - v_old[i][j - 1]) / dy \
- dt * min(v_old[i][j], 0.0) \
* (v_old[i][j + 1] - v_old[i][j]) / dy
v[i][j] = v[i][j] \
+ dt * nue * (v_old[i + 1][j] - 2 * v_old[i][j]
+ v_old[i - 1][j]) / dx**2
v[i][j] = v[i][j] \
+ dt * nue * (v_old[i][j + 1] - 2 * v_old[i][j]
+ v_old[i][j - 1]) / dy**2
for i in range(1, m + 1):
for j in range(1, n + 1):
Ae[i][j] = dt / density / dx**2
Aw[i][j] = dt / density / dx**2
An[i][j] = dt / density / dy**2
As[i][j] = dt / density / dy**2
Ap[i][j] = - Ae[i][j] - Aw[i][j] - An[i][j] - As[i][j]
bb[i][j] = (u[i + 1][j] - u[i - 1][j]) / dx / 2 \
+ (v[i][j + 1] - v[i][j - 1]) / dy / 2
condition.matrix_c(p, Ap, Ae, Aw, An, As, bb, m, n)
solve_matrix(p, Ap, Ae, Aw, An, As, bb, m, n)
return
def solve_u(p, u, density, dx, dt, m, n):
for i in range(1, m + 1):
for j in range(1, n + 1):
u[i][j] = u[i][j] \
- dt / density * (p[i + 1][j] - p[i - 1][j]) / dx / 2
def solve_v(p, v, density, dy, dt, m, n):
for i in range(1, m + 1):
for j in range(1, n + 1):
v[i][j] = v[i][j] \
- dt / density * (p[i][j + 1] - p[i][j - 1]) / dy / 2
| true | true |
f73ad329bb4172d093abf1fe9ec9be88b5e09fbb | 12,651 | py | Python | python/protectWindowsV2/protectWindowsV2.py | ped998/scripts | 0dcaaf47f9676210e1c972a5d59d8d0de82a1d93 | [
"Apache-2.0"
] | null | null | null | python/protectWindowsV2/protectWindowsV2.py | ped998/scripts | 0dcaaf47f9676210e1c972a5d59d8d0de82a1d93 | [
"Apache-2.0"
] | null | null | null | python/protectWindowsV2/protectWindowsV2.py | ped998/scripts | 0dcaaf47f9676210e1c972a5d59d8d0de82a1d93 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Add Physical Linux Servers to File-based Protection Job Using Python"""
### usage: ./protectLinux.py -v mycluster \
# -u myuser \
# -d mydomain.net \
# -j 'My Backup Job' \
# -s myserver1.mydomain.net \
# -s myserver2.mydomain.net \
# -l serverlist.txt \
# -i /var \
# -i /home \
# -e /var/log \
# -e /home/oracle \
# -f excludes.txt
### import pyhesity wrapper module
from pyhesity import *
### command line arguments
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--vip', type=str, required=True)
parser.add_argument('-u', '--username', type=str, required=True)
parser.add_argument('-d', '--domain', type=str, default='local')
parser.add_argument('-k', '--useApiKey', action='store_true')
parser.add_argument('-pwd', '--password', type=str, default=None)
parser.add_argument('-s', '--servername', action='append', type=str)
parser.add_argument('-l', '--serverlist', type=str)
parser.add_argument('-j', '--jobname', type=str, required=True)
parser.add_argument('-a', '--alllocaldrives', action='store_true')
parser.add_argument('-mf', '--metadatafile', type=str, default=None)
parser.add_argument('-i', '--include', action='append', type=str)
parser.add_argument('-n', '--includefile', type=str)
parser.add_argument('-e', '--exclude', action='append', type=str)
parser.add_argument('-x', '--excludefile', type=str)
parser.add_argument('-m', '--skipnestedmountpoints', action='store_true')
parser.add_argument('-sd', '--storagedomain', type=str, default='DefaultStorageDomain')
parser.add_argument('-p', '--policyname', type=str, default=None)
parser.add_argument('-tz', '--timezone', type=str, default='US/Eastern')
parser.add_argument('-st', '--starttime', type=str, default='21:00')
parser.add_argument('-is', '--incrementalsla', type=int, default=60) # incremental SLA minutes
parser.add_argument('-fs', '--fullsla', type=int, default=120) # full SLA minutes
parser.add_argument('-ei', '--enableindexing', action='store_true') # enable indexing
args = parser.parse_args()
vip = args.vip # cluster name/ip
username = args.username # username to connect to cluster
domain = args.domain # domain of username (e.g. local, or AD domain)
password = args.password # password or API key
useApiKey = args.useApiKey # use API key for authentication
servernames = args.servername # name of server to protect
serverlist = args.serverlist # file with server names
jobname = args.jobname # name of protection job to add server to
alllocaldrives = args.alllocaldrives # protect all local drives
metadatafile = args.metadatafile # metadata file path
includes = args.include # include path
includefile = args.includefile # file with include paths
excludes = args.exclude # exclude path
excludefile = args.excludefile # file with exclude paths
skipmountpoints = args.skipnestedmountpoints # skip nested mount points (6.3 and below)
storagedomain = args.storagedomain # storage domain for new job
policyname = args.policyname # policy name for new job
starttime = args.starttime # start time for new job
timezone = args.timezone # time zone for new job
incrementalsla = args.incrementalsla # incremental SLA for new job
fullsla = args.fullsla # full SLA for new job
enableindexing = args.enableindexing # enable indexing on new job
# read server file
if servernames is None:
servernames = []
if serverlist is not None:
f = open(serverlist, 'r')
servernames += [s.strip() for s in f.readlines() if s.strip() != '']
f.close()
if len(servernames) == 0:
print('no servers specified')
exit()
# read include file
if alllocaldrives is True:
includes = ['$ALL_LOCAL_DRIVES']
else:
if includes is None:
includes = []
if includefile is not None:
f = open(includefile, 'r')
includes += [e.strip() for e in f.readlines() if e.strip() != '']
f.close()
if len(includes) == 0:
includes += '/'
# read exclude file
if excludes is None:
excludes = []
if excludefile is not None:
f = open(excludefile, 'r')
excludes += [e.strip() for e in f.readlines() if e.strip() != '']
f.close()
# authenticate to Cohesity
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey)
# get job info
newJob = False
protectionGroups = api('get', 'data-protect/protection-groups?isDeleted=false&isActive=true', v=2)
jobs = protectionGroups['protectionGroups']
job = [job for job in jobs if job['name'].lower() == jobname.lower()]
if not job or len(job) < 1:
newJob = True
print("Job '%s' not found. Creating new job" % jobname)
# find protectionPolicy
if policyname is None:
print('Policy name required for new job')
exit(1)
policy = [p for p in api('get', 'protectionPolicies') if p['name'].lower() == policyname.lower()]
if len(policy) < 1:
print("Policy '%s' not found!" % policyname)
exit(1)
policyid = policy[0]['id']
# find storage domain
sd = [sd for sd in api('get', 'viewBoxes') if sd['name'].lower() == storagedomain.lower()]
if len(sd) < 1:
print("Storage domain %s not found!" % storagedomain)
exit(1)
sdid = sd[0]['id']
# parse starttime
try:
(hour, minute) = starttime.split(':')
hour = int(hour)
minute = int(minute)
if hour < 0 or hour > 23 or minute < 0 or minute > 59:
print('starttime is invalid!')
exit(1)
except Exception:
print('starttime is invalid!')
exit(1)
job = {
"name": jobname,
"policyId": policyid,
"priority": "kMedium",
"storageDomainId": sdid,
"description": "",
"startTime": {
"hour": int(hour),
"minute": int(minute),
"timeZone": timezone
},
"alertPolicy": {
"backupRunStatus": [
"kFailure"
],
"alertTargets": []
},
"sla": [
{
"backupRunType": "kIncremental",
"slaMinutes": int(incrementalsla)
},
{
"backupRunType": "kFull",
"slaMinutes": int(fullsla)
}
],
"qosPolicy": "kBackupHDD",
"abortInBlackouts": False,
"isActive": True,
"isPaused": False,
"environment": "kPhysical",
"permissions": [],
"physicalParams": {
"protectionType": "kFile",
"fileProtectionTypeParams": {
"objects": [],
"indexingPolicy": {
"enableIndexing": enableindexing,
"includePaths": [
"/"
],
"excludePaths": [
"/$Recycle.Bin",
"/Windows",
"/Program Files",
"/Program Files (x86)",
"/ProgramData",
"/System Volume Information",
"/Users/*/AppData",
"/Recovery",
"/var",
"/usr",
"/sys",
"/proc",
"/lib",
"/grub",
"/grub2",
"/opt/splunk",
"/splunk"
]
},
"performSourceSideDeduplication": False,
"dedupExclusionSourceIds": None,
"globalExcludePaths": None
}
}
}
else:
job = job[0]
if 'physicalParams' not in job or job['physicalParams']['protectionType'] != 'kFile':
print("Job '%s' is not a file-based physical protection job" % jobname)
exit(1)
# get registered physical servers
physicalServersRoot = api('get', 'protectionSources/rootNodes?allUnderHierarchy=false&environments=kPhysicalFiles&environments=kPhysical&environments=kPhysical')
physicalServersRootId = physicalServersRoot[0]['protectionSource']['id']
physicalServers = api('get', 'protectionSources?allUnderHierarchy=false&id=%s&includeEntityPermissionInfo=true' % physicalServersRootId)[0]['nodes']
for servername in servernames:
# find server
physicalServer = [s for s in physicalServers if s['protectionSource']['name'].lower() == servername.lower() and s['protectionSource']['physicalProtectionSource']['hostType'] == 'kWindows']
if not physicalServer:
print("******** %s is not a registered Windows server ********" % servername)
else:
physicalServer = physicalServer[0]
mountPoints = []
for volume in physicalServer['protectionSource']['physicalProtectionSource']['volumes']:
if 'mountPoints' in volume:
for mountPoint in volume['mountPoints']:
mountPoint = '/%s' % mountPoint.replace(':\\', '')
mountPoints.append(mountPoint.lower())
theseExcludes = []
for exclude in excludes:
if exclude[0:2] == '*:':
for mountPoint in mountPoints:
theseExcludes.append('%s%s' % (mountPoint[1:], exclude[2:]))
else:
theseExcludes.append(exclude)
# get sourceSpecialParameters
existingobject = [o for o in job['physicalParams']['fileProtectionTypeParams']['objects'] if o['id'] == physicalServer['protectionSource']['id']]
if len(existingobject) > 0:
thisobject = existingobject[0]
thisobject['filePaths'] = []
print(' updating %s in job %s...' % (servername, jobname))
newObject = False
else:
thisobject = {
"id": physicalServer['protectionSource']['id'],
"name": physicalServer['protectionSource']['name'],
"filePaths": [],
"usesPathLevelSkipNestedVolumeSetting": True,
"nestedVolumeTypesToSkip": [],
"followNasSymlinkTarget": False
}
print(' adding %s to job %s...' % (servername, jobname))
newObject = True
if metadatafile is not None:
thisobject['metadataFilePath'] = metadatafile
else:
thisobject['metadataFilePath'] = None
for include in includes:
if include != '$ALL_LOCAL_DRIVES':
include = '/%s' % include.replace(':\\', '/').replace('\\', '/')
if include[0:2].lower() in mountPoints or include == '$ALL_LOCAL_DRIVES':
filePath = {
"includedPath": include,
"excludedPaths": [],
"skipNestedVolumes": skipmountpoints
}
thisobject['filePaths'].append(filePath)
for exclude in theseExcludes:
exclude = '/%s' % exclude.replace(':\\', '/').replace('\\', '/').replace('//', '/')
if exclude[0:2].lower() in mountPoints:
thisParent = ''
for include in includes:
include = '/%s' % include.replace(':\\', '/').replace('\\', '/').replace('//', '/')
if include.lower() in exclude.lower() and '/' in exclude:
if len(include) > len(thisParent):
thisParent = include
if alllocaldrives is True:
thisParent = '$ALL_LOCAL_DRIVES'
for filePath in thisobject['filePaths']:
if filePath['includedPath'].lower() == thisParent.lower():
filePath['excludedPaths'].append(exclude)
# include new parameter
if newObject is True:
job['physicalParams']['fileProtectionTypeParams']['objects'].append(thisobject)
# update job
if newJob is True:
result = api('post', 'data-protect/protection-groups', job, v=2)
else:
result = api('put', 'data-protect/protection-groups/%s' % job['id'], job, v=2)
| 41.343137 | 192 | 0.550866 |
r, required=True)
parser.add_argument('-d', '--domain', type=str, default='local')
parser.add_argument('-k', '--useApiKey', action='store_true')
parser.add_argument('-pwd', '--password', type=str, default=None)
parser.add_argument('-s', '--servername', action='append', type=str)
parser.add_argument('-l', '--serverlist', type=str)
parser.add_argument('-j', '--jobname', type=str, required=True)
parser.add_argument('-a', '--alllocaldrives', action='store_true')
parser.add_argument('-mf', '--metadatafile', type=str, default=None)
parser.add_argument('-i', '--include', action='append', type=str)
parser.add_argument('-n', '--includefile', type=str)
parser.add_argument('-e', '--exclude', action='append', type=str)
parser.add_argument('-x', '--excludefile', type=str)
parser.add_argument('-m', '--skipnestedmountpoints', action='store_true')
parser.add_argument('-sd', '--storagedomain', type=str, default='DefaultStorageDomain')
parser.add_argument('-p', '--policyname', type=str, default=None)
parser.add_argument('-tz', '--timezone', type=str, default='US/Eastern')
parser.add_argument('-st', '--starttime', type=str, default='21:00')
parser.add_argument('-is', '--incrementalsla', type=int, default=60)
parser.add_argument('-fs', '--fullsla', type=int, default=120)
parser.add_argument('-ei', '--enableindexing', action='store_true')
args = parser.parse_args()
vip = args.vip
username = args.username
domain = args.domain
password = args.password
useApiKey = args.useApiKey
servernames = args.servername
serverlist = args.serverlist
jobname = args.jobname
alllocaldrives = args.alllocaldrives
metadatafile = args.metadatafile
includes = args.include
includefile = args.includefile
excludes = args.exclude
excludefile = args.excludefile
skipmountpoints = args.skipnestedmountpoints
storagedomain = args.storagedomain
policyname = args.policyname
starttime = args.starttime
timezone = args.timezone
incrementalsla = args.incrementalsla
fullsla = args.fullsla
enableindexing = args.enableindexing
if servernames is None:
servernames = []
if serverlist is not None:
f = open(serverlist, 'r')
servernames += [s.strip() for s in f.readlines() if s.strip() != '']
f.close()
if len(servernames) == 0:
print('no servers specified')
exit()
if alllocaldrives is True:
includes = ['$ALL_LOCAL_DRIVES']
else:
if includes is None:
includes = []
if includefile is not None:
f = open(includefile, 'r')
includes += [e.strip() for e in f.readlines() if e.strip() != '']
f.close()
if len(includes) == 0:
includes += '/'
if excludes is None:
excludes = []
if excludefile is not None:
f = open(excludefile, 'r')
excludes += [e.strip() for e in f.readlines() if e.strip() != '']
f.close()
apiauth(vip=vip, username=username, domain=domain, password=password, useApiKey=useApiKey)
newJob = False
protectionGroups = api('get', 'data-protect/protection-groups?isDeleted=false&isActive=true', v=2)
jobs = protectionGroups['protectionGroups']
job = [job for job in jobs if job['name'].lower() == jobname.lower()]
if not job or len(job) < 1:
newJob = True
print("Job '%s' not found. Creating new job" % jobname)
if policyname is None:
print('Policy name required for new job')
exit(1)
policy = [p for p in api('get', 'protectionPolicies') if p['name'].lower() == policyname.lower()]
if len(policy) < 1:
print("Policy '%s' not found!" % policyname)
exit(1)
policyid = policy[0]['id']
sd = [sd for sd in api('get', 'viewBoxes') if sd['name'].lower() == storagedomain.lower()]
if len(sd) < 1:
print("Storage domain %s not found!" % storagedomain)
exit(1)
sdid = sd[0]['id']
try:
(hour, minute) = starttime.split(':')
hour = int(hour)
minute = int(minute)
if hour < 0 or hour > 23 or minute < 0 or minute > 59:
print('starttime is invalid!')
exit(1)
except Exception:
print('starttime is invalid!')
exit(1)
job = {
"name": jobname,
"policyId": policyid,
"priority": "kMedium",
"storageDomainId": sdid,
"description": "",
"startTime": {
"hour": int(hour),
"minute": int(minute),
"timeZone": timezone
},
"alertPolicy": {
"backupRunStatus": [
"kFailure"
],
"alertTargets": []
},
"sla": [
{
"backupRunType": "kIncremental",
"slaMinutes": int(incrementalsla)
},
{
"backupRunType": "kFull",
"slaMinutes": int(fullsla)
}
],
"qosPolicy": "kBackupHDD",
"abortInBlackouts": False,
"isActive": True,
"isPaused": False,
"environment": "kPhysical",
"permissions": [],
"physicalParams": {
"protectionType": "kFile",
"fileProtectionTypeParams": {
"objects": [],
"indexingPolicy": {
"enableIndexing": enableindexing,
"includePaths": [
"/"
],
"excludePaths": [
"/$Recycle.Bin",
"/Windows",
"/Program Files",
"/Program Files (x86)",
"/ProgramData",
"/System Volume Information",
"/Users/*/AppData",
"/Recovery",
"/var",
"/usr",
"/sys",
"/proc",
"/lib",
"/grub",
"/grub2",
"/opt/splunk",
"/splunk"
]
},
"performSourceSideDeduplication": False,
"dedupExclusionSourceIds": None,
"globalExcludePaths": None
}
}
}
else:
job = job[0]
if 'physicalParams' not in job or job['physicalParams']['protectionType'] != 'kFile':
print("Job '%s' is not a file-based physical protection job" % jobname)
exit(1)
physicalServersRoot = api('get', 'protectionSources/rootNodes?allUnderHierarchy=false&environments=kPhysicalFiles&environments=kPhysical&environments=kPhysical')
physicalServersRootId = physicalServersRoot[0]['protectionSource']['id']
physicalServers = api('get', 'protectionSources?allUnderHierarchy=false&id=%s&includeEntityPermissionInfo=true' % physicalServersRootId)[0]['nodes']
for servername in servernames:
physicalServer = [s for s in physicalServers if s['protectionSource']['name'].lower() == servername.lower() and s['protectionSource']['physicalProtectionSource']['hostType'] == 'kWindows']
if not physicalServer:
print("******** %s is not a registered Windows server ********" % servername)
else:
physicalServer = physicalServer[0]
mountPoints = []
for volume in physicalServer['protectionSource']['physicalProtectionSource']['volumes']:
if 'mountPoints' in volume:
for mountPoint in volume['mountPoints']:
mountPoint = '/%s' % mountPoint.replace(':\\', '')
mountPoints.append(mountPoint.lower())
theseExcludes = []
for exclude in excludes:
if exclude[0:2] == '*:':
for mountPoint in mountPoints:
theseExcludes.append('%s%s' % (mountPoint[1:], exclude[2:]))
else:
theseExcludes.append(exclude)
existingobject = [o for o in job['physicalParams']['fileProtectionTypeParams']['objects'] if o['id'] == physicalServer['protectionSource']['id']]
if len(existingobject) > 0:
thisobject = existingobject[0]
thisobject['filePaths'] = []
print(' updating %s in job %s...' % (servername, jobname))
newObject = False
else:
thisobject = {
"id": physicalServer['protectionSource']['id'],
"name": physicalServer['protectionSource']['name'],
"filePaths": [],
"usesPathLevelSkipNestedVolumeSetting": True,
"nestedVolumeTypesToSkip": [],
"followNasSymlinkTarget": False
}
print(' adding %s to job %s...' % (servername, jobname))
newObject = True
if metadatafile is not None:
thisobject['metadataFilePath'] = metadatafile
else:
thisobject['metadataFilePath'] = None
for include in includes:
if include != '$ALL_LOCAL_DRIVES':
include = '/%s' % include.replace(':\\', '/').replace('\\', '/')
if include[0:2].lower() in mountPoints or include == '$ALL_LOCAL_DRIVES':
filePath = {
"includedPath": include,
"excludedPaths": [],
"skipNestedVolumes": skipmountpoints
}
thisobject['filePaths'].append(filePath)
for exclude in theseExcludes:
exclude = '/%s' % exclude.replace(':\\', '/').replace('\\', '/').replace('//', '/')
if exclude[0:2].lower() in mountPoints:
thisParent = ''
for include in includes:
include = '/%s' % include.replace(':\\', '/').replace('\\', '/').replace('//', '/')
if include.lower() in exclude.lower() and '/' in exclude:
if len(include) > len(thisParent):
thisParent = include
if alllocaldrives is True:
thisParent = '$ALL_LOCAL_DRIVES'
for filePath in thisobject['filePaths']:
if filePath['includedPath'].lower() == thisParent.lower():
filePath['excludedPaths'].append(exclude)
if newObject is True:
job['physicalParams']['fileProtectionTypeParams']['objects'].append(thisobject)
if newJob is True:
result = api('post', 'data-protect/protection-groups', job, v=2)
else:
result = api('put', 'data-protect/protection-groups/%s' % job['id'], job, v=2)
| true | true |
f73ad353955ca9131653cb4bc8f38ffb029b4f76 | 509 | py | Python | Back-End/Python/Basics/Part -1 - Functional/03 - Function Parameters/05list_recursive.py | ASHISHKUMAR2411/Programming-CookBook | 9c60655d64d21985ccb4196360858d98344701f9 | [
"MIT"
] | 25 | 2021-04-28T02:51:26.000Z | 2022-03-24T13:58:04.000Z | Back-End/Python/Basics/Part -1 - Functional/03 - Function Parameters/05list_recursive.py | ASHISHKUMAR2411/Programming-CookBook | 9c60655d64d21985ccb4196360858d98344701f9 | [
"MIT"
] | 1 | 2022-03-03T23:33:41.000Z | 2022-03-03T23:35:41.000Z | Back-End/Python/Basics/Part -1 - Functional/03 - Function Parameters/05list_recursive.py | ASHISHKUMAR2411/Programming-CookBook | 9c60655d64d21985ccb4196360858d98344701f9 | [
"MIT"
] | 15 | 2021-05-30T01:35:20.000Z | 2022-03-25T12:38:25.000Z | def list_sum_recursive(input_list):
# Base Case
if not input_list:
return 0
# Recursive case
# Decompose the original problem into simpler instances of the same problem
# by making use of the fact that the input is a recursive data structure
# and can be defined in terms of a smaller version of itself
else:
head = input_list[0]
smaller_list = input_list[1:]
return head + list_sum_recursive(smaller_list)
print(list_sum_recursive([1, 2, 3]))
# 6 | 29.941176 | 79 | 0.685658 | def list_sum_recursive(input_list):
if not input_list:
return 0
else:
head = input_list[0]
smaller_list = input_list[1:]
return head + list_sum_recursive(smaller_list)
print(list_sum_recursive([1, 2, 3]))
| true | true |
f73ad36434fd68c44e28df3b136a2eabbbf6104d | 27,929 | py | Python | saas/api/metrics.py | naqibhakimi/djaodjin-saas | c5b9337b21782f62ef1a5e1bbe9c6421a2dcd2df | [
"BSD-2-Clause"
] | null | null | null | saas/api/metrics.py | naqibhakimi/djaodjin-saas | c5b9337b21782f62ef1a5e1bbe9c6421a2dcd2df | [
"BSD-2-Clause"
] | null | null | null | saas/api/metrics.py | naqibhakimi/djaodjin-saas | c5b9337b21782f62ef1a5e1bbe9c6421a2dcd2df | [
"BSD-2-Clause"
] | null | null | null | # Copyright (c) 2021, DjaoDjin inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
from django.utils.translation import ugettext_lazy as _
from rest_framework.generics import GenericAPIView, ListAPIView
from rest_framework.response import Response
from .serializers import CartItemSerializer, LifetimeSerializer, MetricsSerializer
from .. import settings
from ..compat import reverse, six
from ..filters import DateRangeFilter
from ..metrics.base import (
abs_monthly_balances,
aggregate_transactions_by_period,
month_periods,
aggregate_transactions_change_by_period,
get_different_units,
)
from ..metrics.subscriptions import (
active_subscribers,
churn_subscribers,
subscribers_age,
)
from ..metrics.transactions import lifetime_value
from ..mixins import (
CartItemSmartListMixin,
CouponMixin,
ProviderMixin,
DateRangeContextMixin,
)
from ..models import CartItem, Plan, Transaction
from ..utils import convert_dates_to_utc, get_organization_model
LOGGER = logging.getLogger(__name__)
class BalancesAPIView(DateRangeContextMixin, ProviderMixin, GenericAPIView):
"""
Retrieves 12-month trailing deferred balances
Generate a table of revenue (rows) per months (columns) for a default
balance sheet (Income, Backlog, Receivable).
**Tags**: metrics, provider, transactionmodel
**Examples**
.. code-block:: http
GET /api/metrics/cowork/balances/ HTTP/1.1
responds
.. code-block:: json
{
"title": "Balances",
"scale": 0.01,
"unit": "usd",
"table": [
{
"key": "Income",
"values": [
["2014-09-01T00:00:00Z", 0],
["2014-10-01T00:00:00Z", 1532624],
["2014-11-01T00:00:00Z", 2348340],
["2014-12-01T00:00:00Z", 3244770],
["2015-01-01T00:00:00Z", 5494221],
["2015-02-01T00:00:00Z", 7214221],
["2015-03-01T00:00:00Z", 8444221],
["2015-04-01T00:00:00Z", 9784221],
["2015-05-01T00:00:00Z", 12784221],
["2015-06-01T00:00:00Z", 14562341],
["2015-07-01T00:00:00Z", 16567341],
["2015-08-01T00:00:00Z", 17893214],
["2015-08-06T02:24:50.485Z", 221340]
]
},
{
"key": "Backlog",
"values": [
["2014-09-01T00:00:00Z", 1712624],
["2014-10-01T00:00:00Z", 3698340],
["2014-11-01T00:00:00Z", 7214770],
["2014-12-01T00:00:00Z", 10494221],
["2015-01-01T00:00:00Z", 14281970],
["2015-02-01T00:00:00Z", 18762845],
["2015-03-01T00:00:00Z", 24258765],
["2015-04-01T00:00:00Z", 31937741],
["2015-05-01T00:00:00Z", 43002401],
["2015-06-01T00:00:00Z", 53331444],
["2015-07-01T00:00:00Z", 64775621],
["2015-08-01T00:00:00Z", 75050033],
["2015-08-06T02:24:50.485Z", 89156321]
]
},
{
"key": "Receivable",
"values": [
["2014-09-01T00:00:00Z", 0],
["2014-10-01T00:00:00Z", 0],
["2014-11-01T00:00:00Z", 0],
["2014-12-01T00:00:00Z", 0],
["2015-01-01T00:00:00Z", 0],
["2015-02-01T00:00:00Z", 0],
["2015-03-01T00:00:00Z", 0],
["2015-04-01T00:00:00Z", 0],
["2015-05-01T00:00:00Z", 0],
["2015-06-01T00:00:00Z", 0],
["2015-07-01T00:00:00Z", 0],
["2015-08-01T00:00:00Z", 0],
["2015-08-06T02:24:50.485Z", 0]
]
}
]
}
"""
serializer_class = MetricsSerializer
filter_backends = (DateRangeFilter,)
def get(self, request, *args, **kwargs): # pylint: disable=unused-argument
result = []
unit = settings.DEFAULT_UNIT
for key in [Transaction.INCOME, Transaction.BACKLOG, Transaction.RECEIVABLE]:
values, _unit = abs_monthly_balances(
organization=self.provider,
account=key,
until=self.ends_at,
tz=self.timezone,
)
if _unit:
unit = _unit
result += [{"key": key, "values": values}]
return Response(
{"title": "Balances", "unit": unit, "scale": 0.01, "table": result}
)
class RevenueMetricAPIView(DateRangeContextMixin, ProviderMixin, GenericAPIView):
"""
Retrieves 12-month trailing revenue
Produces sales, payments and refunds over a period of time.
The API is typically used within an HTML
`revenue page </docs/themes/#dashboard_metrics_revenue>`_
as present in the default theme.
**Tags**: metrics, provider, transactionmodel
**Examples**
.. code-block:: http
GET /api/metrics/cowork/funds/ HTTP/1.1
responds
.. code-block:: json
{
"title": "Amount",
"scale": 0.01,
"unit": "usd",
"table": [
{
"key": "Total Sales",
"values": [
["2014-10-01T00:00:00Z", 1985716],
["2014-11-01T00:00:00Z", 3516430],
["2014-12-01T00:00:00Z", 3279451],
["2015-01-01T00:00:00Z", 3787749],
["2015-02-01T00:00:00Z", 4480875],
["2015-03-01T00:00:00Z", 5495920],
["2015-04-01T00:00:00Z", 7678976],
["2015-05-01T00:00:00Z", 11064660],
["2015-06-01T00:00:00Z", 10329043],
["2015-07-01T00:00:00Z", 11444177],
["2015-08-01T00:00:00Z", 10274412],
["2015-08-06T04:59:14.721Z", 14106288]
]
},
{
"key": "New Sales",
"values": [
["2014-10-01T00:00:00Z", 0],
["2014-11-01T00:00:00Z", 0],
["2014-12-01T00:00:00Z", 0],
["2015-01-01T00:00:00Z", 0],
["2015-02-01T00:00:00Z", 0],
["2015-03-01T00:00:00Z", 0],
["2015-04-01T00:00:00Z", 0],
["2015-05-01T00:00:00Z", 0],
["2015-06-01T00:00:00Z", 0],
["2015-07-01T00:00:00Z", 0],
["2015-08-01T00:00:00Z", 0],
["2015-08-06T04:59:14.721Z", 0]
]
},
{
"key": "Churned Sales",
"values": [
["2014-10-01T00:00:00Z", 0],
["2014-11-01T00:00:00Z", 0],
["2014-12-01T00:00:00Z", 0],
["2015-01-01T00:00:00Z", 0],
["2015-02-01T00:00:00Z", 0],
["2015-03-01T00:00:00Z", 0],
["2015-04-01T00:00:00Z", 0],
["2015-05-01T00:00:00Z", 0],
["2015-06-01T00:00:00Z", 0],
["2015-07-01T00:00:00Z", 0],
["2015-08-01T00:00:00Z", 0],
["2015-08-06T04:59:14.721Z", 0]
]
},
{
"key": "Payments",
"values": [
["2014-10-01T00:00:00Z", 1787144],
["2014-11-01T00:00:00Z", 3164787],
["2014-12-01T00:00:00Z", 2951505],
["2015-01-01T00:00:00Z", 3408974],
["2015-02-01T00:00:00Z", 4032787],
["2015-03-01T00:00:00Z", 4946328],
["2015-04-01T00:00:00Z", 6911079],
["2015-05-01T00:00:00Z", 9958194],
["2015-06-01T00:00:00Z", 9296138],
["2015-07-01T00:00:00Z", 10299759],
["2015-08-01T00:00:00Z", 9246970],
["2015-08-06T04:59:14.721Z", 12695659]
]
},
{
"key": "Refunds",
"values": [
["2014-10-01T00:00:00Z", 0],
["2014-11-01T00:00:00Z", 0],
["2014-12-01T00:00:00Z", 0],
["2015-01-01T00:00:00Z", 0],
["2015-02-01T00:00:00Z", 0],
["2015-03-01T00:00:00Z", 0],
["2015-04-01T00:00:00Z", 0],
["2015-05-01T00:00:00Z", 0],
["2015-06-01T00:00:00Z", 0],
["2015-07-01T00:00:00Z", 0],
["2015-08-01T00:00:00Z", 0],
["2015-08-06T04:59:14.721Z", 0]
]
}
]
}
"""
serializer_class = MetricsSerializer
filter_backends = (DateRangeFilter,)
def get(self, request, *args, **kwargs):
# pylint:disable=unused-argument
dates = convert_dates_to_utc(month_periods(12, self.ends_at, tz=self.timezone))
unit = settings.DEFAULT_UNIT
account_table, _, _, table_unit = aggregate_transactions_change_by_period(
self.provider,
Transaction.RECEIVABLE,
account_title="Sales",
orig="orig",
dest="dest",
date_periods=dates,
)
_, payment_amounts, payments_unit = aggregate_transactions_by_period(
self.provider,
Transaction.RECEIVABLE,
orig="dest",
dest="dest",
orig_account=Transaction.BACKLOG,
orig_organization=self.provider,
date_periods=dates,
)
_, refund_amounts, refund_unit = aggregate_transactions_by_period(
self.provider,
Transaction.REFUND,
orig="dest",
dest="dest",
date_periods=dates,
)
units = get_different_units(table_unit, payments_unit, refund_unit)
if len(units) > 1:
LOGGER.error("different units in RevenueMetricAPIView.get: %s", units)
if units:
unit = units[0]
account_table += [
{"key": "Payments", "values": payment_amounts},
{"key": "Refunds", "values": refund_amounts},
]
resp = {"title": "Amount", "unit": unit, "scale": 0.01, "table": account_table}
if not self.provider.has_bank_account:
resp.update({"processor_hint": "connect_provider"})
return Response(resp)
class CouponUsesQuerysetMixin(object):
def get_queryset(self):
return CartItem.objects.filter(coupon=self.coupon, recorded=True)
class CouponUsesAPIView(
CartItemSmartListMixin, CouponUsesQuerysetMixin, CouponMixin, ListAPIView
):
"""
Retrieves performance of a discount code
Returns a list of {{PAGE_SIZE}} cart items on which coupon with
code {coupon} was used. Coupon {coupon} must have been created by
provider {organization}.
The queryset can be further refined to match a search filter (``q``)
and/or a range of dates ([``start_at``, ``ends_at``]),
and sorted on specific fields (``o``).
**Tags**: metrics, provider, couponmodel
**Examples**
.. code-block:: http
GET /api/metrics/cowork/coupons/DIS100/ HTTP/1.1
responds
.. code-block:: json
{
"count": 1,
"next": null,
"previous": null,
"results": [
{
"user": {
"slug": "xia",
"created_at": "2012-09-14T23:16:55Z",
"email": "xia@localhost.localdomain",
"full_name": "Xia Doe",
"printable_name": "Xia Doe",
"username": "xia"
},
"plan": "basic",
"created_at": "2014-01-01T09:00:00Z"
}
]
}
"""
forced_date_range = False
serializer_class = CartItemSerializer
class CustomerMetricAPIView(DateRangeContextMixin, ProviderMixin, GenericAPIView):
"""
Retrieves 12-month trailing customer counts
The API is typically used within an HTML
`revenue page </docs/themes/#dashboard_metrics_revenue>`_
as present in the default theme.
**Tags**: metrics, provider, profilemodel
**Examples**
.. code-block:: http
GET /api/metrics/cowork/customers/ HTTP/1.1
responds
.. code-block:: json
{
"title": "Customers",
"table": [
{
"key": "Total # of Customers",
"values": [
["2014-10-01T00:00:00Z", 15],
["2014-11-01T00:00:00Z", 17],
["2014-12-01T00:00:00Z", 19],
["2015-01-01T00:00:00Z", 19],
["2015-02-01T00:00:00Z", 25],
["2015-03-01T00:00:00Z", 29],
["2015-04-01T00:00:00Z", 37],
["2015-05-01T00:00:00Z", 43],
["2015-06-01T00:00:00Z", 46],
["2015-07-01T00:00:00Z", 48],
["2015-08-01T00:00:00Z", 54],
["2015-08-06T05:20:24.537Z", 60]
]
},
{
"key": "# of new Customers",
"values": [
["2014-10-01T00:00:00Z", 2],
["2014-11-01T00:00:00Z", 2],
["2014-12-01T00:00:00Z", 0],
["2015-01-01T00:00:00Z", 6],
["2015-02-01T00:00:00Z", 4],
["2015-03-01T00:00:00Z", 8],
["2015-04-01T00:00:00Z", 6],
["2015-05-01T00:00:00Z", 3],
["2015-06-01T00:00:00Z", 2],
["2015-07-01T00:00:00Z", 6],
["2015-08-01T00:00:00Z", 7],
["2015-08-06T05:20:24.537Z", 0]
]
},
{
"key": "# of churned Customers",
"values": [
["2014-10-01T00:00:00Z", 0],
["2014-11-01T00:00:00Z", 0],
["2014-12-01T00:00:00Z", 0],
["2015-01-01T00:00:00Z", 0],
["2015-02-01T00:00:00Z", 0],
["2015-03-01T00:00:00Z", 0],
["2015-04-01T00:00:00Z", 0],
["2015-05-01T00:00:00Z", 0],
["2015-06-01T00:00:00Z", 0],
["2015-07-01T00:00:00Z", 0],
["2015-08-01T00:00:00Z", 1],
["2015-08-06T05:20:24.537Z", 60]
]
},
{
"key": "Net New Customers",
"values": [
["2014-10-01T00:00:00Z", 2],
["2014-11-01T00:00:00Z", 2],
["2014-12-01T00:00:00Z", 0],
["2015-01-01T00:00:00Z", 6],
["2015-02-01T00:00:00Z", 4],
["2015-03-01T00:00:00Z", 8],
["2015-04-01T00:00:00Z", 6],
["2015-05-01T00:00:00Z", 3],
["2015-06-01T00:00:00Z", 2],
["2015-07-01T00:00:00Z", 6],
["2015-08-01T00:00:00Z", 6],
["2015-08-06T05:20:24.537Z", -60]
]
}
],
"extra": [
{
"key": "% Customer Churn",
"values": [
["2014-10-01T00:00:00Z", 0],
["2014-11-01T00:00:00Z", 0.0],
["2014-12-01T00:00:00Z", 0.0],
["2015-01-01T00:00:00Z", 0.0],
["2015-02-01T00:00:00Z", 0.0],
["2015-03-01T00:00:00Z", 0.0],
["2015-04-01T00:00:00Z", 0.0],
["2015-05-01T00:00:00Z", 0.0],
["2015-06-01T00:00:00Z", 0.0],
["2015-07-01T00:00:00Z", 0.0],
["2015-08-01T00:00:00Z", 2.08],
["2015-08-06T05:20:24.537Z", 111.11]
]
}
]
}
"""
serializer_class = MetricsSerializer
filter_backends = (DateRangeFilter,)
def get(self, request, *args, **kwargs):
# pylint:disable=unused-argument
account_title = "Payments"
account = Transaction.RECEIVABLE
# We use ``Transaction.RECEIVABLE`` which technically counts the number
# or orders, not the number of payments.
dates = convert_dates_to_utc(month_periods(12, self.ends_at, tz=self.timezone))
_, customer_table, customer_extra, _ = aggregate_transactions_change_by_period(
self.provider, account, account_title=account_title, date_periods=dates
)
return Response(
{"title": "Customers", "table": customer_table, "extra": customer_extra}
)
class LifetimeValueMetricMixin(DateRangeContextMixin, ProviderMixin):
"""
Decorates profiles with subscriber age and lifetime value
"""
filter_backends = (DateRangeFilter,)
def get_queryset(self):
organization_model = get_organization_model()
if self.provider:
queryset = organization_model.objects.filter(
subscribes_to__organization=self.provider
).distinct()
else:
queryset = organization_model.objects.all()
queryset = queryset.filter(
outgoing__orig_account=Transaction.PAYABLE
).distinct()
return queryset.order_by("full_name")
def decorate_queryset(self, queryset):
decorated_queryset = list(queryset)
subscriber_ages = {
subscriber["slug"]: subscriber
for subscriber in subscribers_age(provider=self.provider)
}
customer_values = lifetime_value(provider=self.provider)
for organization in decorated_queryset:
subscriber = subscriber_ages.get(organization.slug)
if subscriber:
organization.created_at = subscriber["created_at"]
organization.ends_at = subscriber["ends_at"]
else:
organization.ends_at = None
customer = customer_values.get(organization.slug)
if customer:
for unit, val in six.iteritems(customer):
# XXX Only supports one currency unit.
organization.unit = unit
organization.contract_value = val["contract_value"]
organization.cash_payments = val["payments"]
organization.deferred_revenue = val["deferred_revenue"]
else:
organization.unit = settings.DEFAULT_UNIT
organization.contract_value = 0
organization.cash_payments = 0
organization.deferred_revenue = 0
return decorated_queryset
class LifetimeValueMetricAPIView(LifetimeValueMetricMixin, ListAPIView):
"""
Retrieves customers lifetime value
**Tags**: metrics, provider, profilemodel
**Examples**
.. code-block:: http
GET /api/metrics/cowork/lifetimevalue/ HTTP/1.1
responds
.. code-block:: json
{
"count": 1,
"next": null,
"previous": null,
"results": [
{
"slug": "xia",
"email": "xia@localhost.localdomain",
"full_name": "Xia Doe",
"created_at": "2014-01-01T09:00:00Z",
"ends_at": "2014-01-01T09:00:00Z",
"unit": "usd",
"contract_value": 10000,
"cash_payments": 10000,
"deferred_revenue": 10000
}
]
}
"""
serializer_class = LifetimeSerializer
def paginate_queryset(self, queryset):
page = super(LifetimeValueMetricAPIView, self).paginate_queryset(queryset)
return self.decorate_queryset(page if page else queryset)
class PlanMetricAPIView(DateRangeContextMixin, ProviderMixin, GenericAPIView):
"""
Retrieves 12-month trailing plans performance
The API is typically used within an HTML
`plans metrics page </docs/themes/#dashboard_metrics_plans>`_
as present in the default theme.
**Tags**: metrics, provider, planmodel
**Examples**
.. code-block:: http
GET /api/metrics/cowork/plans/ HTTP/1.1
responds
.. code-block:: json
{
"title": "Active Subscribers",
"table": [
{
"is_active": true,
"key": "open-space",
"location": "/profile/plan/open-space/",
"values": [
["2014-09-01T00:00:00Z", 4],
["2014-10-01T00:00:00Z", 5],
["2014-11-01T00:00:00Z", 6],
["2014-12-01T00:00:00Z", 6],
["2015-01-01T00:00:00Z", 6],
["2015-02-01T00:00:00Z", 9],
["2015-03-01T00:00:00Z", 9],
["2015-04-01T00:00:00Z", 9],
["2015-05-01T00:00:00Z", 11],
["2015-06-01T00:00:00Z", 11],
["2015-07-01T00:00:00Z", 14],
["2015-08-01T00:00:00Z", 16],
["2015-08-06T05:37:50.004Z", 16]
]
},
{
"is_active": true,
"key": "open-plus",
"location": "/profile/plan/open-plus/",
"values": [
["2014-09-01T00:00:00Z", 7],
["2014-10-01T00:00:00Z", 8],
["2014-11-01T00:00:00Z", 9],
["2014-12-01T00:00:00Z", 9],
["2015-01-01T00:00:00Z", 12],
["2015-02-01T00:00:00Z", 13],
["2015-03-01T00:00:00Z", 18],
["2015-04-01T00:00:00Z", 19],
["2015-05-01T00:00:00Z", 19],
["2015-06-01T00:00:00Z", 20],
["2015-07-01T00:00:00Z", 23],
["2015-08-01T00:00:00Z", 25],
["2015-08-06T05:37:50.014Z", 25]
]
},
{
"is_active": true,
"key": "private",
"location": "/profile/plan/private/",
"values": [
["2014-09-01T00:00:00Z", 3],
["2014-10-01T00:00:00Z", 3],
["2014-11-01T00:00:00Z", 3],
["2014-12-01T00:00:00Z", 3],
["2015-01-01T00:00:00Z", 6],
["2015-02-01T00:00:00Z", 7],
["2015-03-01T00:00:00Z", 10],
["2015-04-01T00:00:00Z", 15],
["2015-05-01T00:00:00Z", 16],
["2015-06-01T00:00:00Z", 17],
["2015-07-01T00:00:00Z", 17],
["2015-08-01T00:00:00Z", 18],
["2015-08-06T05:37:50.023Z", 18]
]
}
],
"extra": [
{
"key": "churn",
"values": [
["2014-09-01T00:00:00Z", 0],
["2014-10-01T00:00:00Z", 0],
["2014-11-01T00:00:00Z", 0],
["2014-12-01T00:00:00Z", 0],
["2015-01-01T00:00:00Z", 0],
["2015-02-01T00:00:00Z", 0],
["2015-03-01T00:00:00Z", 0],
["2015-04-01T00:00:00Z", 0],
["2015-05-01T00:00:00Z", 0],
["2015-06-01T00:00:00Z", 0],
["2015-07-01T00:00:00Z", 0],
["2015-08-01T00:00:00Z", 1],
["2015-08-06T05:37:50.031Z", 1]
]
}
]
}
"""
serializer_class = MetricsSerializer
filter_backends = (DateRangeFilter,)
def get(self, request, *args, **kwargs):
# pylint:disable=unused-argument
table = []
for plan in Plan.objects.filter(organization=self.provider).order_by("title"):
values = active_subscribers(plan, from_date=self.ends_at, tz=self.timezone)
table.append(
{
"key": plan.slug,
"title": plan.title,
"values": values,
"location": reverse("saas_plan_edit", args=(self.provider, plan)),
"is_active": plan.is_active,
}
)
extra = [
{
"key": "churn",
"values": churn_subscribers(from_date=self.ends_at, tz=self.timezone),
}
]
return Response(
{"title": _("Active subscribers"), "table": table, "extra": extra}
)
| 37.041114 | 87 | 0.45766 |
import logging
from django.utils.translation import ugettext_lazy as _
from rest_framework.generics import GenericAPIView, ListAPIView
from rest_framework.response import Response
from .serializers import CartItemSerializer, LifetimeSerializer, MetricsSerializer
from .. import settings
from ..compat import reverse, six
from ..filters import DateRangeFilter
from ..metrics.base import (
abs_monthly_balances,
aggregate_transactions_by_period,
month_periods,
aggregate_transactions_change_by_period,
get_different_units,
)
from ..metrics.subscriptions import (
active_subscribers,
churn_subscribers,
subscribers_age,
)
from ..metrics.transactions import lifetime_value
from ..mixins import (
CartItemSmartListMixin,
CouponMixin,
ProviderMixin,
DateRangeContextMixin,
)
from ..models import CartItem, Plan, Transaction
from ..utils import convert_dates_to_utc, get_organization_model
LOGGER = logging.getLogger(__name__)
class BalancesAPIView(DateRangeContextMixin, ProviderMixin, GenericAPIView):
serializer_class = MetricsSerializer
filter_backends = (DateRangeFilter,)
def get(self, request, *args, **kwargs):
result = []
unit = settings.DEFAULT_UNIT
for key in [Transaction.INCOME, Transaction.BACKLOG, Transaction.RECEIVABLE]:
values, _unit = abs_monthly_balances(
organization=self.provider,
account=key,
until=self.ends_at,
tz=self.timezone,
)
if _unit:
unit = _unit
result += [{"key": key, "values": values}]
return Response(
{"title": "Balances", "unit": unit, "scale": 0.01, "table": result}
)
class RevenueMetricAPIView(DateRangeContextMixin, ProviderMixin, GenericAPIView):
serializer_class = MetricsSerializer
filter_backends = (DateRangeFilter,)
def get(self, request, *args, **kwargs):
dates = convert_dates_to_utc(month_periods(12, self.ends_at, tz=self.timezone))
unit = settings.DEFAULT_UNIT
account_table, _, _, table_unit = aggregate_transactions_change_by_period(
self.provider,
Transaction.RECEIVABLE,
account_title="Sales",
orig="orig",
dest="dest",
date_periods=dates,
)
_, payment_amounts, payments_unit = aggregate_transactions_by_period(
self.provider,
Transaction.RECEIVABLE,
orig="dest",
dest="dest",
orig_account=Transaction.BACKLOG,
orig_organization=self.provider,
date_periods=dates,
)
_, refund_amounts, refund_unit = aggregate_transactions_by_period(
self.provider,
Transaction.REFUND,
orig="dest",
dest="dest",
date_periods=dates,
)
units = get_different_units(table_unit, payments_unit, refund_unit)
if len(units) > 1:
LOGGER.error("different units in RevenueMetricAPIView.get: %s", units)
if units:
unit = units[0]
account_table += [
{"key": "Payments", "values": payment_amounts},
{"key": "Refunds", "values": refund_amounts},
]
resp = {"title": "Amount", "unit": unit, "scale": 0.01, "table": account_table}
if not self.provider.has_bank_account:
resp.update({"processor_hint": "connect_provider"})
return Response(resp)
class CouponUsesQuerysetMixin(object):
def get_queryset(self):
return CartItem.objects.filter(coupon=self.coupon, recorded=True)
class CouponUsesAPIView(
CartItemSmartListMixin, CouponUsesQuerysetMixin, CouponMixin, ListAPIView
):
forced_date_range = False
serializer_class = CartItemSerializer
class CustomerMetricAPIView(DateRangeContextMixin, ProviderMixin, GenericAPIView):
serializer_class = MetricsSerializer
filter_backends = (DateRangeFilter,)
def get(self, request, *args, **kwargs):
account_title = "Payments"
account = Transaction.RECEIVABLE
dates = convert_dates_to_utc(month_periods(12, self.ends_at, tz=self.timezone))
_, customer_table, customer_extra, _ = aggregate_transactions_change_by_period(
self.provider, account, account_title=account_title, date_periods=dates
)
return Response(
{"title": "Customers", "table": customer_table, "extra": customer_extra}
)
class LifetimeValueMetricMixin(DateRangeContextMixin, ProviderMixin):
filter_backends = (DateRangeFilter,)
def get_queryset(self):
organization_model = get_organization_model()
if self.provider:
queryset = organization_model.objects.filter(
subscribes_to__organization=self.provider
).distinct()
else:
queryset = organization_model.objects.all()
queryset = queryset.filter(
outgoing__orig_account=Transaction.PAYABLE
).distinct()
return queryset.order_by("full_name")
def decorate_queryset(self, queryset):
decorated_queryset = list(queryset)
subscriber_ages = {
subscriber["slug"]: subscriber
for subscriber in subscribers_age(provider=self.provider)
}
customer_values = lifetime_value(provider=self.provider)
for organization in decorated_queryset:
subscriber = subscriber_ages.get(organization.slug)
if subscriber:
organization.created_at = subscriber["created_at"]
organization.ends_at = subscriber["ends_at"]
else:
organization.ends_at = None
customer = customer_values.get(organization.slug)
if customer:
for unit, val in six.iteritems(customer):
organization.unit = unit
organization.contract_value = val["contract_value"]
organization.cash_payments = val["payments"]
organization.deferred_revenue = val["deferred_revenue"]
else:
organization.unit = settings.DEFAULT_UNIT
organization.contract_value = 0
organization.cash_payments = 0
organization.deferred_revenue = 0
return decorated_queryset
class LifetimeValueMetricAPIView(LifetimeValueMetricMixin, ListAPIView):
serializer_class = LifetimeSerializer
def paginate_queryset(self, queryset):
page = super(LifetimeValueMetricAPIView, self).paginate_queryset(queryset)
return self.decorate_queryset(page if page else queryset)
class PlanMetricAPIView(DateRangeContextMixin, ProviderMixin, GenericAPIView):
serializer_class = MetricsSerializer
filter_backends = (DateRangeFilter,)
def get(self, request, *args, **kwargs):
table = []
for plan in Plan.objects.filter(organization=self.provider).order_by("title"):
values = active_subscribers(plan, from_date=self.ends_at, tz=self.timezone)
table.append(
{
"key": plan.slug,
"title": plan.title,
"values": values,
"location": reverse("saas_plan_edit", args=(self.provider, plan)),
"is_active": plan.is_active,
}
)
extra = [
{
"key": "churn",
"values": churn_subscribers(from_date=self.ends_at, tz=self.timezone),
}
]
return Response(
{"title": _("Active subscribers"), "table": table, "extra": extra}
)
| true | true |
f73ad38bc8446f1b20b10c876e10ac9ac4a364a9 | 2,700 | py | Python | old/PhasedHaplotypeParser.py | orionzhou/biolib | 940fb66f1b2608d34a2d00ebdf41dc84c6381f42 | [
"BSD-2-Clause"
] | 3 | 2019-02-22T20:35:23.000Z | 2021-11-25T10:01:50.000Z | old/PhasedHaplotypeParser.py | orionzhou/biolib | 940fb66f1b2608d34a2d00ebdf41dc84c6381f42 | [
"BSD-2-Clause"
] | null | null | null | old/PhasedHaplotypeParser.py | orionzhou/biolib | 940fb66f1b2608d34a2d00ebdf41dc84c6381f42 | [
"BSD-2-Clause"
] | 1 | 2021-02-19T03:10:14.000Z | 2021-02-19T03:10:14.000Z | '''
@author: Roman Briskine, University of Minnesota
'''
import os.path;
import re;
F_VARIANT = 1;
F_CLASS = 2;
F_POS = 3;
F_REF_ALLELE = 4;
F_VAR_ALLELE = 5;
F_EXON = 9;
F_ACC_OFFSET = 13;
class PhasedHaplotypeParser():
def __init__(self, accessionN = 3, accessionColN = 7, delim = '\t'):
self.accessionN = accessionN;
self.accessionColN = accessionColN;
self.delim = delim;
self.markers = [];
self.haplotypes = [];
for k in range(self.accessionN + 1):
famId = "F%03d" % k;
self.haplotypes.append([famId]);
self.nucleotides = { "A":1, "C":2, "G":3, "T":4 };
def parse(self, fPathIn, freqTheshold, fPathPhased = None, fPathMarker = None):
print("Parsing...");
if fPathPhased == None:
fPathPhased = fPathIn + ".haps";
if fPathMarker == None:
fPathMarker = fPathIn + ".info";
with open(fPathIn, 'r') as fIn:
line = fIn.readline();
hdr = line.split(self.delim);
self.haplotypes[0].append("REF");
for k in range(self.accessionN):
accNameIdx = F_ACC_OFFSET + k * self.accessionColN;
self.haplotypes[k + 1].append(hdr[accNameIdx]);
prevPos = 0;
line = fIn.readline();
while line != "":
fields = line.split(self.delim);
if fields[F_CLASS] == "S" and fields[F_EXON] != '' and fields[F_REF_ALLELE] in self.nucleotides:
if fields[F_POS] != prevPos:
self.markers.append([fields[F_VARIANT] + ":" + fields[F_EXON], fields[F_POS]]);
nId = self.nucleotides[fields[F_REF_ALLELE]];
self.haplotypes[0].append(nId);
for k in range(self.accessionN):
freqIdx = F_ACC_OFFSET + k * self.accessionColN + 3;
if float(fields[freqIdx]) > freqThreshold:
nId = self.nucleotides[fields[F_VAR_ALLELE].upper()];
self.haplotypes[k + 1].append(nId);
else:
nId = self.nucleotides[fields[F_REF_ALLELE]];
self.haplotypes[k + 1].append(nId);
# else:
# for k in range(self.accessionN):
# freqIdx = F_ACC_OFFSET + k * self.accessionColN + 3;
# if float(fields[freqIdx]) > freqThreshold:
# self.haplotypes[k + 1][-1] = self.nucleotides[fields[F_VAR_ALLELE].upper()];
prevPos = fields[F_POS];
line = fIn.readline();
with open(fPathMarker, 'w') as fMarker:
for marker in self.markers:
fMarker.write(self.delim.join(marker));
fMarker.write('\n');
with open(fPathPhased, 'w') as fPhased:
for accession in self.haplotypes:
fPhased.write(self.delim.join(map(str, accession)) + '\n');
fPhased.write(self.delim.join(map(str, accession)) + '\n');
if __name__ == '__main__':
fPathIn = "variant_table.10_30.txt";
freqThreshold = 0.85;
phParser = PhasedHaplotypeParser();
phParser.parse(fPathIn, freqThreshold);
| 31.395349 | 100 | 0.646296 |
import os.path;
import re;
F_VARIANT = 1;
F_CLASS = 2;
F_POS = 3;
F_REF_ALLELE = 4;
F_VAR_ALLELE = 5;
F_EXON = 9;
F_ACC_OFFSET = 13;
class PhasedHaplotypeParser():
def __init__(self, accessionN = 3, accessionColN = 7, delim = '\t'):
self.accessionN = accessionN;
self.accessionColN = accessionColN;
self.delim = delim;
self.markers = [];
self.haplotypes = [];
for k in range(self.accessionN + 1):
famId = "F%03d" % k;
self.haplotypes.append([famId]);
self.nucleotides = { "A":1, "C":2, "G":3, "T":4 };
def parse(self, fPathIn, freqTheshold, fPathPhased = None, fPathMarker = None):
print("Parsing...");
if fPathPhased == None:
fPathPhased = fPathIn + ".haps";
if fPathMarker == None:
fPathMarker = fPathIn + ".info";
with open(fPathIn, 'r') as fIn:
line = fIn.readline();
hdr = line.split(self.delim);
self.haplotypes[0].append("REF");
for k in range(self.accessionN):
accNameIdx = F_ACC_OFFSET + k * self.accessionColN;
self.haplotypes[k + 1].append(hdr[accNameIdx]);
prevPos = 0;
line = fIn.readline();
while line != "":
fields = line.split(self.delim);
if fields[F_CLASS] == "S" and fields[F_EXON] != '' and fields[F_REF_ALLELE] in self.nucleotides:
if fields[F_POS] != prevPos:
self.markers.append([fields[F_VARIANT] + ":" + fields[F_EXON], fields[F_POS]]);
nId = self.nucleotides[fields[F_REF_ALLELE]];
self.haplotypes[0].append(nId);
for k in range(self.accessionN):
freqIdx = F_ACC_OFFSET + k * self.accessionColN + 3;
if float(fields[freqIdx]) > freqThreshold:
nId = self.nucleotides[fields[F_VAR_ALLELE].upper()];
self.haplotypes[k + 1].append(nId);
else:
nId = self.nucleotides[fields[F_REF_ALLELE]];
self.haplotypes[k + 1].append(nId);
prevPos = fields[F_POS];
line = fIn.readline();
with open(fPathMarker, 'w') as fMarker:
for marker in self.markers:
fMarker.write(self.delim.join(marker));
fMarker.write('\n');
with open(fPathPhased, 'w') as fPhased:
for accession in self.haplotypes:
fPhased.write(self.delim.join(map(str, accession)) + '\n');
fPhased.write(self.delim.join(map(str, accession)) + '\n');
if __name__ == '__main__':
fPathIn = "variant_table.10_30.txt";
freqThreshold = 0.85;
phParser = PhasedHaplotypeParser();
phParser.parse(fPathIn, freqThreshold);
| true | true |
f73ad42ed25fbd1c28e831c19a3c55a7c358296f | 487 | py | Python | fatf/metrics/fairness.py | mattclifford1/fat-forensics | bc838d0a9005935095e0a3d060404b917301f98e | [
"BSD-3-Clause"
] | null | null | null | fatf/metrics/fairness.py | mattclifford1/fat-forensics | bc838d0a9005935095e0a3d060404b917301f98e | [
"BSD-3-Clause"
] | null | null | null | fatf/metrics/fairness.py | mattclifford1/fat-forensics | bc838d0a9005935095e0a3d060404b917301f98e | [
"BSD-3-Clause"
] | null | null | null | """Fairness metrics.
This module gathers various metrics to assess fairness of a machine learning
pipeline.
TODO: Implement:
* duplicated rows with different protected attributes and classes,
* sample size disparity (for data and features),
* disparate impact, and
* disparate treatment.
"""
# Author: Kacper Sokol <k.sokol@bristol.ac.uk>
# License: new BSD
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| 25.631579 | 76 | 0.767967 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| true | true |
f73ad4dd5bb0da402a5bb5bd8ee893dd4dab918f | 2,765 | py | Python | app/core/tests/test_models.py | eitan-lukin/recipe-app-api | 13a2781c66bd329748786ba4cc7b380ed8a505c0 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | eitan-lukin/recipe-app-api | 13a2781c66bd329748786ba4cc7b380ed8a505c0 | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | eitan-lukin/recipe-app-api | 13a2781c66bd329748786ba4cc7b380ed8a505c0 | [
"MIT"
] | null | null | null | from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@stuffedpenguinstudio.com', password='testpass'):
"""Create sample user"""
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new iser with an email is successful"""
email = 'test@stuffedpenguinstudios.com'
password = 'Testpass123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test that the new user email is normalized."""
email = 'test@STUFFEDPENGUINSTUDIOS.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error."""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
"""Test creating a new superuser."""
user = get_user_model().objects.create_superuser(
'test@stuffedpenguinstudios.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
"""Test the tag string representation"""
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
"""Test the ingredient string representation"""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
"""Test the recipe string representation"""
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'my_image.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
| 32.151163 | 76 | 0.645931 | from unittest.mock import patch
from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
def sample_user(email='test@stuffedpenguinstudio.com', password='testpass'):
return get_user_model().objects.create_user(email, password)
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
email = 'test@stuffedpenguinstudios.com'
password = 'Testpass123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
email = 'test@STUFFEDPENGUINSTUDIOS.COM'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
user = get_user_model().objects.create_superuser(
'test@stuffedpenguinstudios.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def test_tag_str(self):
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'my_image.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path)
| true | true |
f73ad5f54fd5bc781903da9deb8b4774a8785e56 | 2,271 | py | Python | vel/openai/baselines/common/vec_env/dummy_vec_env.py | tigerwlin/vel | 00e4fbb7b612e888e2cbb5d8455146664638cd0b | [
"MIT"
] | null | null | null | vel/openai/baselines/common/vec_env/dummy_vec_env.py | tigerwlin/vel | 00e4fbb7b612e888e2cbb5d8455146664638cd0b | [
"MIT"
] | null | null | null | vel/openai/baselines/common/vec_env/dummy_vec_env.py | tigerwlin/vel | 00e4fbb7b612e888e2cbb5d8455146664638cd0b | [
"MIT"
] | null | null | null | import numpy as np
# from gym import spaces
from bc_gym_planning_env.envs.base import spaces
from collections import OrderedDict
from . import VecEnv
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
shapes, dtypes = {}, {}
self.keys = []
obs_space = env.observation_space
if isinstance(obs_space, spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
for key, box in subspaces.items():
shapes[key] = box.shape
dtypes[key] = box.dtype
self.keys.append(key)
self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
for e in range(self.num_envs):
obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(self.actions)
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones),
self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def close(self):
return
def render(self, mode='human'):
return [e.render(mode=mode) for e in self.envs]
def _save_obs(self, e, obs):
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
if self.keys==[None]:
return self.buf_obs[None]
else:
return self.buf_obs
| 32.913043 | 111 | 0.582563 | import numpy as np
from bc_gym_planning_env.envs.base import spaces
from collections import OrderedDict
from . import VecEnv
class DummyVecEnv(VecEnv):
def __init__(self, env_fns):
self.envs = [fn() for fn in env_fns]
env = self.envs[0]
VecEnv.__init__(self, len(env_fns), env.observation_space, env.action_space)
shapes, dtypes = {}, {}
self.keys = []
obs_space = env.observation_space
if isinstance(obs_space, spaces.Dict):
assert isinstance(obs_space.spaces, OrderedDict)
subspaces = obs_space.spaces
else:
subspaces = {None: obs_space}
for key, box in subspaces.items():
shapes[key] = box.shape
dtypes[key] = box.dtype
self.keys.append(key)
self.buf_obs = { k: np.zeros((self.num_envs,) + tuple(shapes[k]), dtype=dtypes[k]) for k in self.keys }
self.buf_dones = np.zeros((self.num_envs,), dtype=np.bool)
self.buf_rews = np.zeros((self.num_envs,), dtype=np.float32)
self.buf_infos = [{} for _ in range(self.num_envs)]
self.actions = None
def step_async(self, actions):
self.actions = actions
def step_wait(self):
for e in range(self.num_envs):
obs, self.buf_rews[e], self.buf_dones[e], self.buf_infos[e] = self.envs[e].step(self.actions)
if self.buf_dones[e]:
obs = self.envs[e].reset()
self._save_obs(e, obs)
return (self._obs_from_buf(), np.copy(self.buf_rews), np.copy(self.buf_dones),
self.buf_infos.copy())
def reset(self):
for e in range(self.num_envs):
obs = self.envs[e].reset()
self._save_obs(e, obs)
return self._obs_from_buf()
def close(self):
return
def render(self, mode='human'):
return [e.render(mode=mode) for e in self.envs]
def _save_obs(self, e, obs):
for k in self.keys:
if k is None:
self.buf_obs[k][e] = obs
else:
self.buf_obs[k][e] = obs[k]
def _obs_from_buf(self):
if self.keys==[None]:
return self.buf_obs[None]
else:
return self.buf_obs
| true | true |
f73ad6c195724da595cd5568175cf2d6d8a2d383 | 11,443 | py | Python | handlers/STHandler/__init__.py | rickithadi/sneakpeek | 9219ae2f7e059aff3028ad3d361a41675baf11dd | [
"MIT"
] | null | null | null | handlers/STHandler/__init__.py | rickithadi/sneakpeek | 9219ae2f7e059aff3028ad3d361a41675baf11dd | [
"MIT"
] | null | null | null | handlers/STHandler/__init__.py | rickithadi/sneakpeek | 9219ae2f7e059aff3028ad3d361a41675baf11dd | [
"MIT"
] | null | null | null | import dateutil.parser
import datetime
import time
import re
import requests
from bs4 import BeautifulSoup
from comment import Comment
from difflib import SequenceMatcher
from handlers.AbstractBaseHandler import AbstractBaseHandler, HandlerError
from newspaper import Article
from nltk.util import ngrams
import codecs
class STHandler(AbstractBaseHandler):
soup = None
url = None
title = None
MAX_DAYS_OFFSET = 2
MAX_CURLS_ALLOWED = 5
MIN_PASSING_SCORE = 0.5
SLEEP_BETWEEN_CURLS = 1
ST_PUBLISH_CUTOFF_HOUR = 5
MODERATED_MAX = 0.8 # we don't want perfect scores overwhelming
@classmethod
def handle(cls, url):
cls.url = url
cls.soup = cls.makeSoup()
return cls.handlePremium() if cls.isPremiumArticle() else cls.handleNonPremium()
@classmethod
def makeSoup(cls):
html = requests.get(cls.url).text
soup = BeautifulSoup(html, "html.parser")
cls.soup = soup
return soup
@classmethod
def isPremiumArticle(cls):
html = requests.get(cls.url).text
elem = cls.soup.find(name="div", class_="paid-premium st-flag-1")
return elem is not None
@classmethod
def handleNonPremium(cls):
article = Article(cls.url)
article.download()
article.parse()
title = article.title
body = article.text
return Comment(title, body)
@classmethod
def handlePremium(cls):
cls.title = cls.soup.find(name="meta", property="og:title")['content']
print(f"article title: {cls.title}")
# An article may run for multiple days or be published a day or two later
for days_offset in range(0, cls.MAX_DAYS_OFFSET):
# Trying to find a scraped article with the closest title/body to the submission
possibleMatchingArticles = cls.generateTodaysArticles(days_offset)
closestArticle = cls.getMatchingArticle(possibleMatchingArticles)
if closestArticle is not None:
return closestArticle
print(f"unable to find a suitable article that matches {cls.title}, skipping submission")
return None
@classmethod
def generateTodaysArticles(cls, days_offset):
articlesList = BeautifulSoup(cls.getArticlesIndex(days_offset), "html.parser")
articles = articlesList.findAll(name="a")
scoredArticles = [( article, cls.similar(article.text, cls.title)) for article in articles]
# sorted such that scoredArticles[0] has the best chance of being the article we want
scoredArticles = sorted(scoredArticles, key=lambda x: x[1], reverse=True)
return scoredArticles
@classmethod
def getMatchingArticle(cls, scoredArticles):
# every article in scoredArticles has a chance of being the article we want
# with scoredArticles[0] being the most likely and the last element being the least
# due to rate limits we cannot check all of the articles
articlesCheckedSoFar = 0
while articlesCheckedSoFar < cls.MAX_CURLS_ALLOWED and len(scoredArticles) > 0:
currArticle = scoredArticles.pop(0)
currComment = cls.makeComment(currArticle[0]['href'])
previewComment = cls.handleNonPremium()
if cls.articleBodiesMatch(previewComment.body, currComment.body):
return currComment
articlesCheckedSoFar = articlesCheckedSoFar + 1
time.sleep(cls.SLEEP_BETWEEN_CURLS)
@classmethod
def articleBodiesMatch(cls, previewBody, articleBody):
# the higher the score, the better confidence that previewBody is a subset of articleBody
score = 0
for sentence in cls.split_into_sentences(previewBody):
weight = len(sentence) / float(len(previewBody)) #longer sentences carry more weight
score = score + cls.isNeedleInHay(needle=sentence, hay=articleBody) * weight
return score > cls.MIN_PASSING_SCORE
@classmethod
def makeComment(cls, bestCandidate):
url = f"https://www.pressreader.com{bestCandidate}"
article = Article(url, browser_user_agent = "Googlebot-News", keep_article_html=True)
article.download()
try:
article.parse()
except:
return Comment('','')
title = article.title.replace("\xad", "") # clean the text
body = article.text.replace("\xad", "") # clean the text
print(f"checking the article in this url: {url} with title {title}")
return Comment(title, body)
@classmethod
def getArticlesIndex(cls, days_offset):
publishedDate = cls.getDate(days_offset)
userAgent = "Googlebot-News"
url = f"https://www.pressreader.com/singapore/the-straits-times/{publishedDate}"
headers = { "User-Agent": userAgent }
articlesList = requests.get(url, headers=headers).text
articlesList = articlesList.replace("­", "") # clean the text
return articlesList
@classmethod
def getDate(cls, days_offset):
elem = cls.soup.find(name="meta", property="article:published_time")
rawDateTime = elem['content']
dateTime = dateutil.parser.parse(rawDateTime) + datetime.timedelta(days=days_offset)
# articles published after the cutoff hour will only appear in the next days index
if dateTime.hour > cls.ST_PUBLISH_CUTOFF_HOUR:
dateTime = dateTime + datetime.timedelta(days=1)
return dateTime.strftime('%Y%m%d')
# is candidate title "similar" to title?
# some fuzzy matching is used
# returns 0 <= score <= 1
# higher score is more similar
@classmethod
def similar(cls, candidate, title):
title = title.lower()
candidate = candidate.lower()
articles = ["a", "an", "the"]
pronouns = ["all", "another", "any", "anybody", "anyone", "anything", "as", "aught", "both", "each", "each", "other", "either", "enough", "everybody", "everyone", "everything", "few", "he", "her", "hers", "herself", "him", "himself", "his", "idem", "it", "its", "itself", "many", "me", "mine", "most", "my", "myself", "naught", "neither", "no", "one", "nobody", "none", "nothing", "nought", "one", "one", "another", "other", "others", "ought", "our", "ours", "ourself", "ourselves", "several", "she", "some", "somebody", "someone", "something", "somewhat", "such", "suchlike", "that", "thee", "their", "theirs", "theirself", "theirselves", "them", "themself", "themselves", "there", "these", "they", "thine", "this", "those", "thou", "thy", "thyself", "us", "we", "what", "whatever", "whatnot", "whatsoever", "whence", "where", "whereby", "wherefrom", "wherein", "whereinto", "whereof", "whereon", "wherever", "wheresoever", "whereto", "whereunto", "wherewith", "wherewithal", "whether", "which", "whichever", "whichsoever", "who", "whoever", "whom", "whomever", "whomso", "whomsoever", "whose", "whosever", "whosesoever", "whoso", "whosoever", "ye", "yon", "yonder", "you", "your", "yours", "yourself", "yourselves"]
prepositions = ["of", "with", "at", "from", "into", "during", "including", "until", "against", "among", "throughout", "despite", "towards", "upon", "concerning", "to", "in", "for", "on", "by", "about", "like", "through", "over", "before", "between", "after", "since", "without", "under", "within", "along", "following", "across", "behind", "beyond", "plus", "except", "but", "up", "out", "around", "down", "off", "above", "near"]
conjunctions = ["for", "and", "nor", "but", "or", "yet", "so", "after", "although", "as", "as", "if", "as", "long", "as", "as", "much", "as", "as", "soon", "as", "as", "though", "because", "before", "by", "the", "time", "even", "if", "even", "though", "if", "in", "order", "that", "in", "case", "lest", "once", "only", "if", "provided", "that", "since", "so", "that", "than", "that", "though", "till", "unless", "until", "when", "whenever", "where", "wherever", "while", "both", "and", "either", "or", "neither", "nor", "not", "only", "but", "also", "whether", "or"]
redherrings = ["singapore", "singaporeans", "s'pore", "says", "is", "has", "are", "am", "were", "been", "have", "had", "having"]
blacklist = set(articles + pronouns + prepositions + conjunctions + redherrings)
score = 0
wordsScored = 0
for word in re.compile("[ '.:\;,.!&\"]").split(candidate):
if word in blacklist:
continue
currScore = cls.isNeedleInHay(needle=word, hay=title)
currScore = (currScore - 0.5) * 2 # ranges 0.5-1, so normalise to 0-1
if currScore < 0.5:
continue
wordsScored = wordsScored + 1
score = score + currScore
if wordsScored > 0:
finalScore = (score / wordsScored)
else:
finalScore = 0
return cls.MODERATED_MAX if finalScore == 1 else finalScore
#https://stackoverflow.com/a/31433394
# fuzzily searches for a needle in a haystack and returns the confidence that needle was found
@classmethod
def isNeedleInHay(cls, needle, hay):
needle_length = len(needle.split())
max_sim_val = 0
max_sim_string = u""
for ngram in ngrams(hay.split(), needle_length + int(.2*needle_length)):
hay_ngram = u" ".join(ngram)
similarity = SequenceMatcher(None, hay_ngram, needle).ratio()
if similarity > max_sim_val:
max_sim_val = similarity
max_sim_string = hay_ngram
return max_sim_val # how confident are we that needle was found in hay
#https://stackoverflow.com/a/31505798
# given a string paragraph, return a list of sentences
@classmethod
def split_into_sentences(cls, text):
alphabets= "([A-Za-z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
text = " " + text + " "
text = text.replace("\n"," ")
text = re.sub(prefixes,"\\1<prd>",text)
text = re.sub(websites,"<prd>\\1",text)
if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
text = re.sub("\s" + alphabets + "[.] "," \\1<prd> ",text)
text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + alphabets + "[.]"," \\1<prd>",text)
if "”" in text: text = text.replace(".”","”.")
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace(".",".<stop>")
text = text.replace("?","?<stop>")
text = text.replace("!","!<stop>")
text = text.replace("<prd>",".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [s.strip() for s in sentences]
return sentences
| 48.901709 | 1,225 | 0.606397 | import dateutil.parser
import datetime
import time
import re
import requests
from bs4 import BeautifulSoup
from comment import Comment
from difflib import SequenceMatcher
from handlers.AbstractBaseHandler import AbstractBaseHandler, HandlerError
from newspaper import Article
from nltk.util import ngrams
import codecs
class STHandler(AbstractBaseHandler):
soup = None
url = None
title = None
MAX_DAYS_OFFSET = 2
MAX_CURLS_ALLOWED = 5
MIN_PASSING_SCORE = 0.5
SLEEP_BETWEEN_CURLS = 1
ST_PUBLISH_CUTOFF_HOUR = 5
MODERATED_MAX = 0.8
@classmethod
def handle(cls, url):
cls.url = url
cls.soup = cls.makeSoup()
return cls.handlePremium() if cls.isPremiumArticle() else cls.handleNonPremium()
@classmethod
def makeSoup(cls):
html = requests.get(cls.url).text
soup = BeautifulSoup(html, "html.parser")
cls.soup = soup
return soup
@classmethod
def isPremiumArticle(cls):
html = requests.get(cls.url).text
elem = cls.soup.find(name="div", class_="paid-premium st-flag-1")
return elem is not None
@classmethod
def handleNonPremium(cls):
article = Article(cls.url)
article.download()
article.parse()
title = article.title
body = article.text
return Comment(title, body)
@classmethod
def handlePremium(cls):
cls.title = cls.soup.find(name="meta", property="og:title")['content']
print(f"article title: {cls.title}")
# An article may run for multiple days or be published a day or two later
for days_offset in range(0, cls.MAX_DAYS_OFFSET):
# Trying to find a scraped article with the closest title/body to the submission
possibleMatchingArticles = cls.generateTodaysArticles(days_offset)
closestArticle = cls.getMatchingArticle(possibleMatchingArticles)
if closestArticle is not None:
return closestArticle
print(f"unable to find a suitable article that matches {cls.title}, skipping submission")
return None
@classmethod
def generateTodaysArticles(cls, days_offset):
articlesList = BeautifulSoup(cls.getArticlesIndex(days_offset), "html.parser")
articles = articlesList.findAll(name="a")
scoredArticles = [( article, cls.similar(article.text, cls.title)) for article in articles]
# sorted such that scoredArticles[0] has the best chance of being the article we want
scoredArticles = sorted(scoredArticles, key=lambda x: x[1], reverse=True)
return scoredArticles
@classmethod
def getMatchingArticle(cls, scoredArticles):
# every article in scoredArticles has a chance of being the article we want
# with scoredArticles[0] being the most likely and the last element being the least
# due to rate limits we cannot check all of the articles
articlesCheckedSoFar = 0
while articlesCheckedSoFar < cls.MAX_CURLS_ALLOWED and len(scoredArticles) > 0:
currArticle = scoredArticles.pop(0)
currComment = cls.makeComment(currArticle[0]['href'])
previewComment = cls.handleNonPremium()
if cls.articleBodiesMatch(previewComment.body, currComment.body):
return currComment
articlesCheckedSoFar = articlesCheckedSoFar + 1
time.sleep(cls.SLEEP_BETWEEN_CURLS)
@classmethod
def articleBodiesMatch(cls, previewBody, articleBody):
# the higher the score, the better confidence that previewBody is a subset of articleBody
score = 0
for sentence in cls.split_into_sentences(previewBody):
weight = len(sentence) / float(len(previewBody)) #longer sentences carry more weight
score = score + cls.isNeedleInHay(needle=sentence, hay=articleBody) * weight
return score > cls.MIN_PASSING_SCORE
@classmethod
def makeComment(cls, bestCandidate):
url = f"https://www.pressreader.com{bestCandidate}"
article = Article(url, browser_user_agent = "Googlebot-News", keep_article_html=True)
article.download()
try:
article.parse()
except:
return Comment('','')
title = article.title.replace("\xad", "") # clean the text
body = article.text.replace("\xad", "") # clean the text
print(f"checking the article in this url: {url} with title {title}")
return Comment(title, body)
@classmethod
def getArticlesIndex(cls, days_offset):
publishedDate = cls.getDate(days_offset)
userAgent = "Googlebot-News"
url = f"https://www.pressreader.com/singapore/the-straits-times/{publishedDate}"
headers = { "User-Agent": userAgent }
articlesList = requests.get(url, headers=headers).text
articlesList = articlesList.replace("­", "") # clean the text
return articlesList
@classmethod
def getDate(cls, days_offset):
elem = cls.soup.find(name="meta", property="article:published_time")
rawDateTime = elem['content']
dateTime = dateutil.parser.parse(rawDateTime) + datetime.timedelta(days=days_offset)
# articles published after the cutoff hour will only appear in the next days index
if dateTime.hour > cls.ST_PUBLISH_CUTOFF_HOUR:
dateTime = dateTime + datetime.timedelta(days=1)
return dateTime.strftime('%Y%m%d')
# is candidate title "similar" to title?
# some fuzzy matching is used
# returns 0 <= score <= 1
# higher score is more similar
@classmethod
def similar(cls, candidate, title):
title = title.lower()
candidate = candidate.lower()
articles = ["a", "an", "the"]
pronouns = ["all", "another", "any", "anybody", "anyone", "anything", "as", "aught", "both", "each", "each", "other", "either", "enough", "everybody", "everyone", "everything", "few", "he", "her", "hers", "herself", "him", "himself", "his", "idem", "it", "its", "itself", "many", "me", "mine", "most", "my", "myself", "naught", "neither", "no", "one", "nobody", "none", "nothing", "nought", "one", "one", "another", "other", "others", "ought", "our", "ours", "ourself", "ourselves", "several", "she", "some", "somebody", "someone", "something", "somewhat", "such", "suchlike", "that", "thee", "their", "theirs", "theirself", "theirselves", "them", "themself", "themselves", "there", "these", "they", "thine", "this", "those", "thou", "thy", "thyself", "us", "we", "what", "whatever", "whatnot", "whatsoever", "whence", "where", "whereby", "wherefrom", "wherein", "whereinto", "whereof", "whereon", "wherever", "wheresoever", "whereto", "whereunto", "wherewith", "wherewithal", "whether", "which", "whichever", "whichsoever", "who", "whoever", "whom", "whomever", "whomso", "whomsoever", "whose", "whosever", "whosesoever", "whoso", "whosoever", "ye", "yon", "yonder", "you", "your", "yours", "yourself", "yourselves"]
prepositions = ["of", "with", "at", "from", "into", "during", "including", "until", "against", "among", "throughout", "despite", "towards", "upon", "concerning", "to", "in", "for", "on", "by", "about", "like", "through", "over", "before", "between", "after", "since", "without", "under", "within", "along", "following", "across", "behind", "beyond", "plus", "except", "but", "up", "out", "around", "down", "off", "above", "near"]
conjunctions = ["for", "and", "nor", "but", "or", "yet", "so", "after", "although", "as", "as", "if", "as", "long", "as", "as", "much", "as", "as", "soon", "as", "as", "though", "because", "before", "by", "the", "time", "even", "if", "even", "though", "if", "in", "order", "that", "in", "case", "lest", "once", "only", "if", "provided", "that", "since", "so", "that", "than", "that", "though", "till", "unless", "until", "when", "whenever", "where", "wherever", "while", "both", "and", "either", "or", "neither", "nor", "not", "only", "but", "also", "whether", "or"]
redherrings = ["singapore", "singaporeans", "s'pore", "says", "is", "has", "are", "am", "were", "been", "have", "had", "having"]
blacklist = set(articles + pronouns + prepositions + conjunctions + redherrings)
score = 0
wordsScored = 0
for word in re.compile("[ '.:\;,.!&\"]").split(candidate):
if word in blacklist:
continue
currScore = cls.isNeedleInHay(needle=word, hay=title)
currScore = (currScore - 0.5) * 2 # ranges 0.5-1, so normalise to 0-1
if currScore < 0.5:
continue
wordsScored = wordsScored + 1
score = score + currScore
if wordsScored > 0:
finalScore = (score / wordsScored)
else:
finalScore = 0
return cls.MODERATED_MAX if finalScore == 1 else finalScore
#https://stackoverflow.com/a/31433394
# fuzzily searches for a needle in a haystack and returns the confidence that needle was found
@classmethod
def isNeedleInHay(cls, needle, hay):
needle_length = len(needle.split())
max_sim_val = 0
max_sim_string = u""
for ngram in ngrams(hay.split(), needle_length + int(.2*needle_length)):
hay_ngram = u" ".join(ngram)
similarity = SequenceMatcher(None, hay_ngram, needle).ratio()
if similarity > max_sim_val:
max_sim_val = similarity
max_sim_string = hay_ngram
return max_sim_val # how confident are we that needle was found in hay
#https://stackoverflow.com/a/31505798
# given a string paragraph, return a list of sentences
@classmethod
def split_into_sentences(cls, text):
alphabets= "([A-Za-z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
text = " " + text + " "
text = text.replace("\n"," ")
text = re.sub(prefixes,"\\1<prd>",text)
text = re.sub(websites,"<prd>\\1",text)
if "Ph.D" in text: text = text.replace("Ph.D.","Ph<prd>D<prd>")
text = re.sub("\s" + alphabets + "[.] "," \\1<prd> ",text)
text = re.sub(acronyms+" "+starters,"\\1<stop> \\2",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>\\3<prd>",text)
text = re.sub(alphabets + "[.]" + alphabets + "[.]","\\1<prd>\\2<prd>",text)
text = re.sub(" "+suffixes+"[.] "+starters," \\1<stop> \\2",text)
text = re.sub(" "+suffixes+"[.]"," \\1<prd>",text)
text = re.sub(" " + alphabets + "[.]"," \\1<prd>",text)
if "”" in text: text = text.replace(".”","”.")
if "\"" in text: text = text.replace(".\"","\".")
if "!" in text: text = text.replace("!\"","\"!")
if "?" in text: text = text.replace("?\"","\"?")
text = text.replace(".",".<stop>")
text = text.replace("?","?<stop>")
text = text.replace("!","!<stop>")
text = text.replace("<prd>",".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [s.strip() for s in sentences]
return sentences
| true | true |
f73ad72a01bae2b413234ceb961487abb75f4fc6 | 5,393 | py | Python | dexp/cli/video_commands/overlay.py | haesleinhuepf/dexp | 2ea84f3db323724588fac565fae56f0d522bc5ca | [
"BSD-3-Clause"
] | 16 | 2021-04-21T14:09:19.000Z | 2022-03-22T02:30:59.000Z | dexp/cli/video_commands/overlay.py | haesleinhuepf/dexp | 2ea84f3db323724588fac565fae56f0d522bc5ca | [
"BSD-3-Clause"
] | 28 | 2021-04-15T17:43:08.000Z | 2022-03-29T16:08:35.000Z | dexp/cli/video_commands/overlay.py | haesleinhuepf/dexp | 2ea84f3db323724588fac565fae56f0d522bc5ca | [
"BSD-3-Clause"
] | 3 | 2022-02-08T17:41:30.000Z | 2022-03-18T15:32:27.000Z | import click
from dexp.cli.defaults import _default_workers_backend
from dexp.video.overlay import add_overlays_image_sequence
@click.command()
@click.argument("input_path", type=str)
@click.option("--output_path", "-o", type=str, default=None, help="Output folder for overlayed frames.")
@click.option("--scalebar/--no-scalebar", "-sb/-nsb", default=True, help="True to insert scale bar.", show_default=True)
@click.option(
"--barlength", "-bl", type=float, default=1, help="Length of scale bar in the provided unit.", show_default=True
)
@click.option(
"--barscale",
"-bs",
type=float,
default=1,
help="Conversion factor from pixels to units -- what is the side length of a pixel/voxel in units.",
show_default=True,
)
@click.option("--barheight", "-bh", type=int, default=4, help="Height of th scale bar in pixels", show_default=True)
@click.option(
"--barpos",
"-bp",
type=str,
default="bottom_right",
help="Positions of the scale bar in pixels in natural order: (x, y)."
" Can also be a string: bottom_left, bottom_right, top_left, top_right.",
show_default=True,
)
@click.option("--barunit", "-bu", type=str, default="μm", help="Scale bar unit name.", show_default=True)
@click.option(
"--timestamp/--no-timestamp", "-ts/-nts", default=True, help="True to insert time stamp.", show_default=True
)
@click.option("--timestart", "-ts", type=float, default=0, help="Start time for time stamp", show_default=True)
@click.option(
"--timeinterval",
"-ti",
type=float,
default=1,
help="Time interval inn units of time between consecutive images.",
show_default=True,
)
@click.option(
"--timepos",
"-tp",
type=str,
default="top_right",
help="Positions of the time stamp in pixels in natural order: (x, y)."
" Can also be a string: bottom_left, bottom_right, top_left, top_right.",
show_default=True,
)
@click.option("--timeunit", "-tu", type=str, default="s", help="Time stamp time unit name.", show_default=True)
@click.option(
"--margin",
"-mg",
type=float,
default=1,
help="Margin around bar expressed in units relative to the text height",
show_default=True,
)
@click.option(
"--color",
"-c",
type=str,
default=None,
help="Color of the bar and text as tuple of 4 values: (R, G, B, A)",
show_default=True,
)
@click.option(
"--numberformat",
"-nf",
type=str,
default="{:.1f}",
help="Format string to represent the start and end values.",
show_default=True,
)
@click.option("--fontname", "-fn", type=str, default="Helvetica", help="Font name.", show_default=True)
@click.option("--fontsize", "-fs", type=int, default=32, help="Font size in pixels.", show_default=True)
@click.option(
"--mode",
"-md",
type=str,
default="max",
help="Blending modes. Either one for all images, or one per image in the form of a sequence."
" Blending modes are: mean, add, satadd, max, alpha.",
show_default=True,
)
@click.option("--overwrite", "-w", is_flag=True, help="Force overwrite of output images.", show_default=True)
@click.option(
"--workers",
"-k",
type=int,
default=-1,
help="Number of worker threads to spawn, set to -1 for maximum number of workers",
show_default=True,
) #
@click.option(
"--workersbackend",
"-wkb",
type=str,
default=_default_workers_backend,
help="What backend to spawn workers with, can be ‘loky’ (multi-process) or ‘threading’ (multi-thread) ",
show_default=True,
) #
@click.option("--device", "-d", type=int, default=0, help="Sets the CUDA devices id, e.g. 0,1,2", show_default=True) #
def overlay(
input_path: str,
output_path: str,
scalebar,
barlength,
barscale,
barheight,
barpos,
barunit,
timestamp,
timestart,
timeinterval,
timepos,
timeunit,
margin,
color,
numberformat,
fontname,
fontsize,
mode,
overwrite,
workers,
workersbackend,
device,
):
"""Adds scale bars and timestamps to videos."""
# Default output path:
if output_path is None:
output_path = input_path + "_overlay"
elif output_path.startswith("_"):
output_path = input_path + output_path
# Parse bar position:
if "," in barpos:
barpos = tuple(float(v) for v in barpos.split(","))
# Parse time position:
if "," in timepos:
timepos = tuple(float(v) for v in timepos.split(","))
# Parse color:
if color is not None and "," in color:
color = tuple(float(v) for v in color.split(","))
add_overlays_image_sequence(
input_path=input_path,
output_path=output_path,
scale_bar=scalebar,
scale_bar_length_in_unit=barlength,
scale_bar_pixel_scale=barscale,
scale_bar_bar_height=barheight,
scale_bar_translation=barpos,
scale_bar_unit=barunit,
time_stamp=timestamp,
time_stamp_start_time=timestart,
time_stamp_time_interval=timeinterval,
time_stamp_translation=timepos,
time_stamp_unit=timeunit,
margin=margin,
color=color,
number_format=numberformat,
font_name=fontname,
font_size=fontsize,
mode=mode,
overwrite=overwrite,
workers=workers,
workersbackend=workersbackend,
device=device,
)
| 30.297753 | 120 | 0.649546 | import click
from dexp.cli.defaults import _default_workers_backend
from dexp.video.overlay import add_overlays_image_sequence
@click.command()
@click.argument("input_path", type=str)
@click.option("--output_path", "-o", type=str, default=None, help="Output folder for overlayed frames.")
@click.option("--scalebar/--no-scalebar", "-sb/-nsb", default=True, help="True to insert scale bar.", show_default=True)
@click.option(
"--barlength", "-bl", type=float, default=1, help="Length of scale bar in the provided unit.", show_default=True
)
@click.option(
"--barscale",
"-bs",
type=float,
default=1,
help="Conversion factor from pixels to units -- what is the side length of a pixel/voxel in units.",
show_default=True,
)
@click.option("--barheight", "-bh", type=int, default=4, help="Height of th scale bar in pixels", show_default=True)
@click.option(
"--barpos",
"-bp",
type=str,
default="bottom_right",
help="Positions of the scale bar in pixels in natural order: (x, y)."
" Can also be a string: bottom_left, bottom_right, top_left, top_right.",
show_default=True,
)
@click.option("--barunit", "-bu", type=str, default="μm", help="Scale bar unit name.", show_default=True)
@click.option(
"--timestamp/--no-timestamp", "-ts/-nts", default=True, help="True to insert time stamp.", show_default=True
)
@click.option("--timestart", "-ts", type=float, default=0, help="Start time for time stamp", show_default=True)
@click.option(
"--timeinterval",
"-ti",
type=float,
default=1,
help="Time interval inn units of time between consecutive images.",
show_default=True,
)
@click.option(
"--timepos",
"-tp",
type=str,
default="top_right",
help="Positions of the time stamp in pixels in natural order: (x, y)."
" Can also be a string: bottom_left, bottom_right, top_left, top_right.",
show_default=True,
)
@click.option("--timeunit", "-tu", type=str, default="s", help="Time stamp time unit name.", show_default=True)
@click.option(
"--margin",
"-mg",
type=float,
default=1,
help="Margin around bar expressed in units relative to the text height",
show_default=True,
)
@click.option(
"--color",
"-c",
type=str,
default=None,
help="Color of the bar and text as tuple of 4 values: (R, G, B, A)",
show_default=True,
)
@click.option(
"--numberformat",
"-nf",
type=str,
default="{:.1f}",
help="Format string to represent the start and end values.",
show_default=True,
)
@click.option("--fontname", "-fn", type=str, default="Helvetica", help="Font name.", show_default=True)
@click.option("--fontsize", "-fs", type=int, default=32, help="Font size in pixels.", show_default=True)
@click.option(
"--mode",
"-md",
type=str,
default="max",
help="Blending modes. Either one for all images, or one per image in the form of a sequence."
" Blending modes are: mean, add, satadd, max, alpha.",
show_default=True,
)
@click.option("--overwrite", "-w", is_flag=True, help="Force overwrite of output images.", show_default=True)
@click.option(
"--workers",
"-k",
type=int,
default=-1,
help="Number of worker threads to spawn, set to -1 for maximum number of workers",
show_default=True,
)
@click.option(
"--workersbackend",
"-wkb",
type=str,
default=_default_workers_backend,
help="What backend to spawn workers with, can be ‘loky’ (multi-process) or ‘threading’ (multi-thread) ",
show_default=True,
)
@click.option("--device", "-d", type=int, default=0, help="Sets the CUDA devices id, e.g. 0,1,2", show_default=True)
def overlay(
input_path: str,
output_path: str,
scalebar,
barlength,
barscale,
barheight,
barpos,
barunit,
timestamp,
timestart,
timeinterval,
timepos,
timeunit,
margin,
color,
numberformat,
fontname,
fontsize,
mode,
overwrite,
workers,
workersbackend,
device,
):
if output_path is None:
output_path = input_path + "_overlay"
elif output_path.startswith("_"):
output_path = input_path + output_path
if "," in barpos:
barpos = tuple(float(v) for v in barpos.split(","))
if "," in timepos:
timepos = tuple(float(v) for v in timepos.split(","))
if color is not None and "," in color:
color = tuple(float(v) for v in color.split(","))
add_overlays_image_sequence(
input_path=input_path,
output_path=output_path,
scale_bar=scalebar,
scale_bar_length_in_unit=barlength,
scale_bar_pixel_scale=barscale,
scale_bar_bar_height=barheight,
scale_bar_translation=barpos,
scale_bar_unit=barunit,
time_stamp=timestamp,
time_stamp_start_time=timestart,
time_stamp_time_interval=timeinterval,
time_stamp_translation=timepos,
time_stamp_unit=timeunit,
margin=margin,
color=color,
number_format=numberformat,
font_name=fontname,
font_size=fontsize,
mode=mode,
overwrite=overwrite,
workers=workers,
workersbackend=workersbackend,
device=device,
)
| true | true |
f73ad750dfbd94912907169da6f40a4583e0b43a | 26,254 | py | Python | old/shiny/match_localities/match_SI_GBIF.py | Smithsonian/Mass-Georeferencing | bb7d81cd82684900003d3049764cd2d243325248 | [
"Apache-2.0"
] | 5 | 2020-06-24T16:12:48.000Z | 2021-11-08T09:46:02.000Z | old/shiny/match_localities/match_SI_GBIF.py | Smithsonian/Mass-Georeferencing | bb7d81cd82684900003d3049764cd2d243325248 | [
"Apache-2.0"
] | 8 | 2020-07-06T21:11:58.000Z | 2020-07-22T13:10:48.000Z | old/shiny/match_localities/match_SI_GBIF.py | Smithsonian/Mass-Georeferencing | bb7d81cd82684900003d3049764cd2d243325248 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
#
# Match SI GBIF records without coordinates to other GBIF records for the species/genus
#
import psycopg2, os, logging, sys, locale, psycopg2.extras
import pandas as pd
from time import localtime, strftime
from fuzzywuzzy import fuzz
import pycountry
#Import settings
import settings
#Set locale for number format
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
#Get current time
current_time = strftime("%Y%m%d_%H%M%S", localtime())
# Set Logging
if not os.path.exists('logs'):
os.makedirs('logs')
logfile_name = 'logs/{}.log'.format(current_time)
# from http://stackoverflow.com/a/9321890
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',
filename=logfile_name,
filemode='a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logger1 = logging.getLogger("si_georef")
#search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)
def search_fuzzy(locality, stateprovince, data, filter_stateprovince = True, method = 'partial', threshold = 80):
"""Search localities in the databases for matches using fuzzywuzzy."""
try:
int(threshold)
except:
print('invalid threshold value')
sys.exit(1)
#Check results
if method == 'partial':
data['score1'] = data.apply(lambda row : fuzz.partial_ratio(locality, row['name']), axis = 1)
if filter_stateprovince == True:
data['score2'] = data.apply(lambda row : fuzz.partial_ratio(stateprovince, row['stateprovince']), axis = 1)
data['score'] = (data['score1'] + data['score2'])/2
results = data.drop(columns = ['score1', 'score2'])
else:
data['score'] = data['score1']
results = data.drop(columns = ['score1'])
elif method == 'set':
data['score1'] = data.apply(lambda row : fuzz.token_set_ratio(locality, row['name']), axis = 1)
if filter_stateprovince == True:
data['score2'] = data.apply(lambda row : fuzz.token_set_ratio(stateprovince, row['stateprovince']), axis = 1)
data['score'] = (data['score1'] + data['score2'])/2
results = data.drop(columns = ['score1', 'score2'])
else:
data['score'] = data['score1']
results = data.drop(columns = ['score1'])
results = results[results.score > threshold]
#print(results)
return results
#Connect to the dpogis database
try:
logger1.info("Connecting to the database.")
conn = psycopg2.connect(host = settings.pg_host, database = settings.pg_db, user = settings.pg_user, connect_timeout = 60)
except:
print(" ERROR: Could not connect to server.")
sys.exit(1)
conn.autocommit = True
cur = conn.cursor(cursor_factory = psycopg2.extras.RealDictCursor)
if len(sys.argv) > 1:
arg = sys.argv[1]
if arg == "plants":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND phylum = 'Tracheophyta' GROUP BY species, kingdom, phylum, class, _order, family, genus"
#sel_species = "SELECT DISTINCT species FROM gbif_si WHERE species != '' AND ((decimallatitude is null and decimallongitude is null) OR (georeferenceprotocol LIKE '%%unknown%%') OR (locality != '')) AND phylum = 'Tracheophyta'"
elif arg == "birds":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND class = 'Aves' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "mammals":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND class = 'Mammalia' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "reptiles":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND class = 'Reptilia' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "amphibians":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND class = 'Amphibia' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "bivalves":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND basisofrecord = 'FOSSIL_SPECIMEN' AND class = 'Bivalvia' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "gastropods":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND basisofrecord = 'FOSSIL_SPECIMEN' AND class = 'Gastropoda' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "crabs":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND basisofrecord = 'FOSSIL_SPECIMEN' AND class = 'Malacostraca' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "echinoids":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND basisofrecord = 'FOSSIL_SPECIMEN' AND class = 'Echinoidea' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "iz":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND family = 'Unionidae' GROUP BY species, kingdom, phylum, class, _order, family, genus"
else:
print("Invalid argument")
sys.exit(1)
#Select species
cur.execute(sel_species)
logger1.debug(cur.query)
scinames = cur.fetchall()
for sciname in scinames:
cur.execute("DELETE FROM gbif_si_matches WHERE species = %s", (sciname['species'],))
logger1.debug(cur.query)
cur.execute("DELETE FROM gbif_si_summary WHERE species = %(species)s AND kingdom = %(kingdom)s AND phylum = %(phylum)s AND class = %(class)s AND _order = %(_order)s AND family = %(family)s AND genus = %(genus)s", {'species': sciname['species'], 'kingdom': sciname['kingdom'], 'phylum': sciname['phylum'], 'class': sciname['class'], '_order': sciname['_order'], 'family': sciname['family'], 'genus': sciname['genus']})
logger1.debug(cur.query)
#search_fuzzy(locality, scientificname, countrycode, db, cur, rank = 'species', method = 'partial', threshold = 80):
#Loop the species
for sciname in scinames:
logger1.info("sciname: {}".format(sciname['species']))
#Get countries
cur.execute("SELECT countrycode FROM gbif_si WHERE species = %s AND decimallatitude is null and decimallongitude is null AND lower(locality) != 'unknown' AND locality != '' GROUP BY countrycode", (sciname['species'],))
logger1.debug(cur.query)
countries = cur.fetchall()
for country in countries:
#Get records for the country
cur.execute("SELECT MAX(gbifid::bigint)::text as gbifid, countrycode, stateprovince, locality, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species = %(species)s AND countrycode = %(countrycode)s AND decimallatitude is null and decimallongitude is null AND lower(locality) != 'unknown' AND locality != '' GROUP BY countrycode, stateprovince, locality, kingdom, phylum, class, _order, family, genus", {'species': sciname['species'], 'countrycode': country['countrycode']})
logger1.debug(cur.query)
records = pd.DataFrame(cur.fetchall())
################
#Get candidates
################
#GBIF - species
logger1.info("GBIF: {}".format(country['countrycode']))
query_template = "SELECT MAX(gbifid::bigint)::text as uid, locality as name, count(*) as no_records, countrycode, trim(leading ', ' from replace(municipality || ', ' || county || ', ' || stateprovince || ', ' || countrycode, ', , ', '')) as located_at, stateprovince, recordedBy FROM gbif WHERE {rank} = '{scientificname}' AND lower(locality) != 'unknown' AND countrycode = '{countrycode}' GROUP BY countrycode, locality, municipality, county, stateprovince, recordedBy"
cur.execute(query_template.format(rank = 'species', scientificname = sciname['species'], countrycode = country['countrycode']))
logger1.debug(cur.query)
candidates = pd.DataFrame(cur.fetchall())
logger1.info("No. of GBIF candidates: {}".format(len(candidates)))
if len(candidates) > 0:
#Iterate each record
for index, record in records.iterrows():
logger1.info("record gbifid: {}".format(record['gbifid']))
logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
if record['stateprovince'] == '':
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, filter_stateprovince = False, method = 'set', threshold = 80)
else:
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, filter_stateprovince = True, method = 'set', threshold = 80)
logger1.info("No. of possible matches: {}".format(len(data)))
if len(data) > 0:
for index, row in data.iterrows():
cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, no_records, species, match, score, located_at, timestamp) VALUES
(%(gbifid)s, %(source)s, %(no_records)s, %(species)s, %(match)s, %(score)s, %(located_at)s, NOW())""", {'gbifid': record['gbifid'], 'source': 'gbif.species', 'no_records': str(row['no_records']), 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'located_at': row['located_at']})
logger1.debug(cur.query)
#GBIF - genus
logger1.info("GBIF genus: {}".format(country['countrycode']))
query_template = "SELECT MAX(gbifid::bigint)::text as uid, locality as name, count(*) as no_records, countrycode, trim(leading ', ' from replace(municipality || ', ' || county || ', ' || stateprovince || ', ' || countrycode, ', , ', '')) as located_at, stateprovince, recordedBy FROM gbif WHERE {rank} = '{genus}' AND species != '{scientificname}' AND lower(locality) != 'unknown' AND countrycode = '{countrycode}' GROUP BY countrycode, locality, municipality, county, stateprovince, recordedBy"
cur.execute(query_template.format(rank = 'genus', genus = sciname['genus'], scientificname = sciname['species'], countrycode = country['countrycode']))
logger1.debug(cur.query)
candidates = pd.DataFrame(cur.fetchall())
logger1.info("No. of GBIF candidates: {}".format(len(candidates)))
if len(candidates) > 0:
#Iterate each record
for index, record in records.iterrows():
logger1.info("record gbifid: {}".format(record['gbifid']))
logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
if record['stateprovince'] == '':
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, filter_stateprovince = False, method = 'set', threshold = 80)
else:
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, filter_stateprovince = True, method = 'set', threshold = 80)
logger1.info("No. of possible matches: {}".format(len(data)))
if len(data) > 0:
for index, row in data.iterrows():
cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, no_records, species, match, score, located_at, timestamp) VALUES
(%(gbifid)s, %(source)s, %(no_records)s, %(species)s, %(match)s, %(score)s, %(located_at)s, NOW())""", {'gbifid': record['gbifid'], 'source': 'gbif.genus', 'no_records': str(row['no_records']), 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'located_at': row['located_at']})
logger1.debug(cur.query)
######################
#WDPA
logger1.info("WDPA: {}".format(country['countrycode']))
if pycountry.countries.get(alpha_2 = record['countrycode']) != None:
iso = pycountry.countries.get(alpha_2 = record['countrycode']).alpha_3
query_template = """
SELECT uid, name, gadm2 as stateprovince, 'wdpa_polygons' as source FROM wdpa_polygons WHERE parent_iso = '{iso}' AND lower(name) != 'unknown'
UNION
SELECT uid, orig_name AS name, gadm2 as stateprovince, 'wdpa_polygons' as source FROM wdpa_polygons WHERE parent_iso = '{iso}' AND lower(name) != 'unknown'
UNION
SELECT uid, name, gadm2 as stateprovince, 'wdpa_points' as source FROM wdpa_points WHERE parent_iso = '{iso}' AND lower(name) != 'unknown'
UNION
SELECT uid, orig_name AS name, gadm2 as stateprovince, 'wdpa_points' as source FROM wdpa_points WHERE parent_iso = '{iso}' AND lower(name) != 'unknown'
"""
cur.execute(query_template.format(iso = iso))
logger1.debug(cur.query)
candidates = pd.DataFrame(cur.fetchall())
logger1.info("No. of WDPA candidates: {}".format(len(candidates)))
if len(candidates) > 0:
#Iterate each record
for index, record in records.iterrows():
logger1.info("record gbifid: {}".format(record['gbifid']))
logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)
logger1.info("No. of possible matches: {}".format(len(data)))
if len(data) > 0:
for index, row in data.iterrows():
cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES
(%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())""", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})
logger1.debug(cur.query)
######################
#GADM
logger1.info("GADM: {}".format(country['countrycode']))
if pycountry.countries.get(alpha_2 = record['countrycode']) != None:
country = pycountry.countries.get(alpha_2 = record['countrycode']).name
#GADM1
query_template = "SELECT uid, name_1 as name, name_0 as stateprovince, 'gadm1' as source FROM gadm1 WHERE name_0 = '{country}' UNION SELECT uid, varname_1 as name, name_0 as stateprovince, 'gadm1' as source FROM gadm1 WHERE name_0 = '{country}' AND varname_1 IS NOT NULL"
cur.execute(query_template.format(country = country.replace("'", "''")))
data = pd.DataFrame(cur.fetchall())
#GADM2
query_template = "SELECT uid, name_2 as name, name_1 || ', ' || name_0 as stateprovince, 'gadm2' as source FROM gadm2 WHERE name_0 = '{country}' UNION SELECT uid, varname_2 as name, name_1 || ', ' || name_0 as stateprovince, 'gadm2' as source FROM gadm2 WHERE name_0 = '{country}' AND varname_2 IS NOT NULL"
cur.execute(query_template.format(country = country.replace("'", "''")))
data1 = pd.DataFrame(cur.fetchall())
data = pd.concat([data, data1], ignore_index=True)
#GADM3
query_template = "SELECT uid, name_3 as name, name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm3' as source FROM gadm3 WHERE name_0 = '{country}' UNION SELECT uid, varname_3 as name, name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm3' as source FROM gadm3 WHERE name_0 = '{country}' AND varname_3 IS NOT NULL"
cur.execute(query_template.format(country = country.replace("'", "''")))
data1 = pd.DataFrame(cur.fetchall())
data = pd.concat([data, data1], ignore_index=True)
#GADM4
query_template = "SELECT uid, name_4 as name, name_3 || ', ' || name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm4' as source FROM gadm4 WHERE name_0 = '{country}' UNION SELECT uid, varname_4 as name, name_3 || ', ' || name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm4' as source FROM gadm4 WHERE name_0 = '{country}' AND varname_4 IS NOT NULL"
cur.execute(query_template.format(country = country.replace("'", "''")))
data1 = pd.DataFrame(cur.fetchall())
data = pd.concat([data, data1], ignore_index=True)
#GADM5
query_template = "SELECT uid, name_5 as name, name_4 || ', ' || name_3 || ', ' || name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm5' as source FROM gadm5 WHERE name_0 = '{country}'"
cur.execute(query_template.format(country = country.replace("'", "''")))
data1 = pd.DataFrame(cur.fetchall())
candidates = pd.concat([data, data1], ignore_index=True)
logger1.info("No. of GADM candidates: {}".format(len(candidates)))
if len(candidates) > 0:
#Iterate each record
for index, record in records.iterrows():
logger1.info("record gbifid: {}".format(record['gbifid']))
logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)
logger1.info("No. of possible matches: {}".format(len(data)))
if len(data) > 0:
for index, row in data.iterrows():
cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES
(%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())""", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})
logger1.debug(cur.query)
######################
#Geonames
# if record['countrycode'] != None:
# query_template = """
# SELECT uid, name, gadm2 as stateprovince, 'geonames' as source FROM geonames WHERE country_code = '{countrycode}'
# UNION
# SELECT uid, unnest(string_to_array(alternatenames, ',')) as name, gadm2 as stateprovince, 'geonames' as source FROM geonames WHERE country_code = '{countrycode}'
# """
# cur.execute(query_template.format(countrycode = record['countrycode']))
# logger1.debug(cur.query)
# candidates = pd.DataFrame(cur.fetchall())
# logger1.info("No. of candidates: {}".format(len(candidates)))
# if len(candidates) > 0:
# #Iterate each record
# for index, record in records.iterrows():
# logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
# data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)
# for index, row in data.iterrows():
# cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES
# (%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())""", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})
# logger1.debug(cur.query)
######################
#GNIS
if record['countrycode'] == 'US':
logger1.info("GNIS: {}, US".format(record['stateprovince']))
query_template = "SELECT uid, feature_name as name, gadm2 as stateprovince, 'gnis' as source FROM gnis WHERE state_alpha ILIKE '%{stateprovince}%'"
cur.execute(query_template.format(stateprovince = record['stateprovince']))
logger1.debug(cur.query)
candidates = pd.DataFrame(cur.fetchall())
logger1.info("No. of GNIS candidates: {}".format(len(candidates)))
if len(candidates) > 0:
#Iterate each record
for index, record in records.iterrows():
logger1.info("record gbifid: {}".format(record['gbifid']))
logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)
logger1.info("No. of possible matches: {}".format(len(data)))
if len(data) > 0:
for index, row in data.iterrows():
cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES
(%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())""", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})
logger1.debug(cur.query)
#############
#Lakes
if pycountry.countries.get(alpha_2 = record['countrycode']) != None:
country = pycountry.countries.get(alpha_2 = record['countrycode']).name
logger1.info("Lakes: {}".format(country.replace("'", "''")))
query_template = "SELECT uid, lake_name as name, gadm2 as stateprovince, 'global_lakes' as source FROM global_lakes WHERE country ILIKE '%{country}%'"
cur.execute(query_template.format(country = country.replace("'", "''")))
logger1.debug(cur.query)
else:
query_template = "SELECT uid, lake_name as name, gadm2 as stateprovince, 'global_lakes' as source FROM global_lakes"
cur.execute(query_template)
candidates = pd.DataFrame(cur.fetchall())
logger1.info("No. of global_lakes candidates: {}".format(len(candidates)))
if len(candidates) > 0:
#Iterate each record
for index, record in records.iterrows():
logger1.info("record gbifid: {}".format(record['gbifid']))
logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)
logger1.info("No. of possible matches: {}".format(len(data)))
if len(data) > 0:
for index, row in data.iterrows():
cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES
(%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())""", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})
logger1.debug(cur.query)
#Save summary of results
cur.execute("SELECT count(*) as no_records FROM gbif_si_matches WHERE species = %s", (sciname['species'],))
logger1.debug(cur.query)
no_records = cur.fetchone()
if no_records['no_records'] > 0:
cur.execute("""INSERT INTO gbif_si_summary (species, kingdom, phylum, class, _order, family, genus, no_records)
(SELECT %(species)s, %(kingdom)s, %(phylum)s, %(class)s, %(_order)s, %(family)s, %(genus)s, count(*) FROM gbif_si_matches where species = %(species)s);""", {'species': sciname['species'], 'kingdom': sciname['kingdom'], 'phylum': sciname['phylum'], 'class': sciname['class'], '_order': sciname['_order'], 'family': sciname['family'], 'genus': sciname['genus']})
logger1.debug(cur.query)
cur.execute("DELETE FROM gbif_si_summary WHERE no_records = 0")
sys.exit(0) | 76.319767 | 503 | 0.611564 |
import psycopg2, os, logging, sys, locale, psycopg2.extras
import pandas as pd
from time import localtime, strftime
from fuzzywuzzy import fuzz
import pycountry
import settings
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
current_time = strftime("%Y%m%d_%H%M%S", localtime())
if not os.path.exists('logs'):
os.makedirs('logs')
logfile_name = 'logs/{}.log'.format(current_time)
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt='%m-%d %H:%M:%S',
filename=logfile_name,
filemode='a')
console = logging.StreamHandler()
console.setLevel(logging.INFO)
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
console.setFormatter(formatter)
logging.getLogger('').addHandler(console)
logger1 = logging.getLogger("si_georef")
def search_fuzzy(locality, stateprovince, data, filter_stateprovince = True, method = 'partial', threshold = 80):
try:
int(threshold)
except:
print('invalid threshold value')
sys.exit(1)
if method == 'partial':
data['score1'] = data.apply(lambda row : fuzz.partial_ratio(locality, row['name']), axis = 1)
if filter_stateprovince == True:
data['score2'] = data.apply(lambda row : fuzz.partial_ratio(stateprovince, row['stateprovince']), axis = 1)
data['score'] = (data['score1'] + data['score2'])/2
results = data.drop(columns = ['score1', 'score2'])
else:
data['score'] = data['score1']
results = data.drop(columns = ['score1'])
elif method == 'set':
data['score1'] = data.apply(lambda row : fuzz.token_set_ratio(locality, row['name']), axis = 1)
if filter_stateprovince == True:
data['score2'] = data.apply(lambda row : fuzz.token_set_ratio(stateprovince, row['stateprovince']), axis = 1)
data['score'] = (data['score1'] + data['score2'])/2
results = data.drop(columns = ['score1', 'score2'])
else:
data['score'] = data['score1']
results = data.drop(columns = ['score1'])
results = results[results.score > threshold]
return results
try:
logger1.info("Connecting to the database.")
conn = psycopg2.connect(host = settings.pg_host, database = settings.pg_db, user = settings.pg_user, connect_timeout = 60)
except:
print(" ERROR: Could not connect to server.")
sys.exit(1)
conn.autocommit = True
cur = conn.cursor(cursor_factory = psycopg2.extras.RealDictCursor)
if len(sys.argv) > 1:
arg = sys.argv[1]
if arg == "plants":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND phylum = 'Tracheophyta' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "birds":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND class = 'Aves' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "mammals":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND class = 'Mammalia' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "reptiles":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND class = 'Reptilia' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "amphibians":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND class = 'Amphibia' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "bivalves":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND basisofrecord = 'FOSSIL_SPECIMEN' AND class = 'Bivalvia' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "gastropods":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND basisofrecord = 'FOSSIL_SPECIMEN' AND class = 'Gastropoda' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "crabs":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND basisofrecord = 'FOSSIL_SPECIMEN' AND class = 'Malacostraca' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "echinoids":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND basisofrecord = 'FOSSIL_SPECIMEN' AND class = 'Echinoidea' GROUP BY species, kingdom, phylum, class, _order, family, genus"
elif arg == "iz":
sel_species = "SELECT species, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species != '' AND decimallatitude is null and decimallongitude is null AND locality != '' AND family = 'Unionidae' GROUP BY species, kingdom, phylum, class, _order, family, genus"
else:
print("Invalid argument")
sys.exit(1)
cur.execute(sel_species)
logger1.debug(cur.query)
scinames = cur.fetchall()
for sciname in scinames:
cur.execute("DELETE FROM gbif_si_matches WHERE species = %s", (sciname['species'],))
logger1.debug(cur.query)
cur.execute("DELETE FROM gbif_si_summary WHERE species = %(species)s AND kingdom = %(kingdom)s AND phylum = %(phylum)s AND class = %(class)s AND _order = %(_order)s AND family = %(family)s AND genus = %(genus)s", {'species': sciname['species'], 'kingdom': sciname['kingdom'], 'phylum': sciname['phylum'], 'class': sciname['class'], '_order': sciname['_order'], 'family': sciname['family'], 'genus': sciname['genus']})
logger1.debug(cur.query)
for sciname in scinames:
logger1.info("sciname: {}".format(sciname['species']))
cur.execute("SELECT countrycode FROM gbif_si WHERE species = %s AND decimallatitude is null and decimallongitude is null AND lower(locality) != 'unknown' AND locality != '' GROUP BY countrycode", (sciname['species'],))
logger1.debug(cur.query)
countries = cur.fetchall()
for country in countries:
cur.execute("SELECT MAX(gbifid::bigint)::text as gbifid, countrycode, stateprovince, locality, kingdom, phylum, class, _order, family, genus FROM gbif_si WHERE species = %(species)s AND countrycode = %(countrycode)s AND decimallatitude is null and decimallongitude is null AND lower(locality) != 'unknown' AND locality != '' GROUP BY countrycode, stateprovince, locality, kingdom, phylum, class, _order, family, genus", {'species': sciname['species'], 'countrycode': country['countrycode']})
logger1.debug(cur.query)
records = pd.DataFrame(cur.fetchall())
lace(municipality || ', ' || county || ', ' || stateprovince || ', ' || countrycode, ', , ', '')) as located_at, stateprovince, recordedBy FROM gbif WHERE {rank} = '{scientificname}' AND lower(locality) != 'unknown' AND countrycode = '{countrycode}' GROUP BY countrycode, locality, municipality, county, stateprovince, recordedBy"
cur.execute(query_template.format(rank = 'species', scientificname = sciname['species'], countrycode = country['countrycode']))
logger1.debug(cur.query)
candidates = pd.DataFrame(cur.fetchall())
logger1.info("No. of GBIF candidates: {}".format(len(candidates)))
if len(candidates) > 0:
for index, record in records.iterrows():
logger1.info("record gbifid: {}".format(record['gbifid']))
logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
if record['stateprovince'] == '':
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, filter_stateprovince = False, method = 'set', threshold = 80)
else:
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, filter_stateprovince = True, method = 'set', threshold = 80)
logger1.info("No. of possible matches: {}".format(len(data)))
if len(data) > 0:
for index, row in data.iterrows():
cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, no_records, species, match, score, located_at, timestamp) VALUES
(%(gbifid)s, %(source)s, %(no_records)s, %(species)s, %(match)s, %(score)s, %(located_at)s, NOW())""", {'gbifid': record['gbifid'], 'source': 'gbif.species', 'no_records': str(row['no_records']), 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'located_at': row['located_at']})
logger1.debug(cur.query)
logger1.info("GBIF genus: {}".format(country['countrycode']))
query_template = "SELECT MAX(gbifid::bigint)::text as uid, locality as name, count(*) as no_records, countrycode, trim(leading ', ' from replace(municipality || ', ' || county || ', ' || stateprovince || ', ' || countrycode, ', , ', '')) as located_at, stateprovince, recordedBy FROM gbif WHERE {rank} = '{genus}' AND species != '{scientificname}' AND lower(locality) != 'unknown' AND countrycode = '{countrycode}' GROUP BY countrycode, locality, municipality, county, stateprovince, recordedBy"
cur.execute(query_template.format(rank = 'genus', genus = sciname['genus'], scientificname = sciname['species'], countrycode = country['countrycode']))
logger1.debug(cur.query)
candidates = pd.DataFrame(cur.fetchall())
logger1.info("No. of GBIF candidates: {}".format(len(candidates)))
if len(candidates) > 0:
for index, record in records.iterrows():
logger1.info("record gbifid: {}".format(record['gbifid']))
logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
if record['stateprovince'] == '':
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, filter_stateprovince = False, method = 'set', threshold = 80)
else:
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, filter_stateprovince = True, method = 'set', threshold = 80)
logger1.info("No. of possible matches: {}".format(len(data)))
if len(data) > 0:
for index, row in data.iterrows():
cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, no_records, species, match, score, located_at, timestamp) VALUES
(%(gbifid)s, %(source)s, %(no_records)s, %(species)s, %(match)s, %(score)s, %(located_at)s, NOW())""", {'gbifid': record['gbifid'], 'source': 'gbif.genus', 'no_records': str(row['no_records']), 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'located_at': row['located_at']})
logger1.debug(cur.query)
_3
query_template = """
SELECT uid, name, gadm2 as stateprovince, 'wdpa_polygons' as source FROM wdpa_polygons WHERE parent_iso = '{iso}' AND lower(name) != 'unknown'
UNION
SELECT uid, orig_name AS name, gadm2 as stateprovince, 'wdpa_polygons' as source FROM wdpa_polygons WHERE parent_iso = '{iso}' AND lower(name) != 'unknown'
UNION
SELECT uid, name, gadm2 as stateprovince, 'wdpa_points' as source FROM wdpa_points WHERE parent_iso = '{iso}' AND lower(name) != 'unknown'
UNION
SELECT uid, orig_name AS name, gadm2 as stateprovince, 'wdpa_points' as source FROM wdpa_points WHERE parent_iso = '{iso}' AND lower(name) != 'unknown'
"""
cur.execute(query_template.format(iso = iso))
logger1.debug(cur.query)
candidates = pd.DataFrame(cur.fetchall())
logger1.info("No. of WDPA candidates: {}".format(len(candidates)))
if len(candidates) > 0:
for index, record in records.iterrows():
logger1.info("record gbifid: {}".format(record['gbifid']))
logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)
logger1.info("No. of possible matches: {}".format(len(data)))
if len(data) > 0:
for index, row in data.iterrows():
cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES
(%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())""", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})
logger1.debug(cur.query)
ame
query_template = "SELECT uid, name_1 as name, name_0 as stateprovince, 'gadm1' as source FROM gadm1 WHERE name_0 = '{country}' UNION SELECT uid, varname_1 as name, name_0 as stateprovince, 'gadm1' as source FROM gadm1 WHERE name_0 = '{country}' AND varname_1 IS NOT NULL"
cur.execute(query_template.format(country = country.replace("'", "''")))
data = pd.DataFrame(cur.fetchall())
#GADM2
query_template = "SELECT uid, name_2 as name, name_1 || ', ' || name_0 as stateprovince, 'gadm2' as source FROM gadm2 WHERE name_0 = '{country}' UNION SELECT uid, varname_2 as name, name_1 || ', ' || name_0 as stateprovince, 'gadm2' as source FROM gadm2 WHERE name_0 = '{country}' AND varname_2 IS NOT NULL"
cur.execute(query_template.format(country = country.replace("'", "''")))
data1 = pd.DataFrame(cur.fetchall())
data = pd.concat([data, data1], ignore_index=True)
query_template = "SELECT uid, name_3 as name, name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm3' as source FROM gadm3 WHERE name_0 = '{country}' UNION SELECT uid, varname_3 as name, name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm3' as source FROM gadm3 WHERE name_0 = '{country}' AND varname_3 IS NOT NULL"
cur.execute(query_template.format(country = country.replace("'", "''")))
data1 = pd.DataFrame(cur.fetchall())
data = pd.concat([data, data1], ignore_index=True)
#GADM4
query_template = "SELECT uid, name_4 as name, name_3 || ', ' || name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm4' as source FROM gadm4 WHERE name_0 = '{country}' UNION SELECT uid, varname_4 as name, name_3 || ', ' || name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm4' as source FROM gadm4 WHERE name_0 = '{country}' AND varname_4 IS NOT NULL"
cur.execute(query_template.format(country = country.replace("'", "''")))
data1 = pd.DataFrame(cur.fetchall())
data = pd.concat([data, data1], ignore_index=True)
query_template = "SELECT uid, name_5 as name, name_4 || ', ' || name_3 || ', ' || name_2 || ', ' || name_1 || ', ' || name_0 as stateprovince, 'gadm5' as source FROM gadm5 WHERE name_0 = '{country}'"
cur.execute(query_template.format(country = country.replace("'", "''")))
data1 = pd.DataFrame(cur.fetchall())
candidates = pd.concat([data, data1], ignore_index=True)
logger1.info("No. of GADM candidates: {}".format(len(candidates)))
if len(candidates) > 0:
#Iterate each record
for index, record in records.iterrows():
logger1.info("record gbifid: {}".format(record['gbifid']))
logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)
logger1.info("No. of possible matches: {}".format(len(data)))
if len(data) > 0:
for index, row in data.iterrows():
cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES
(%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())""", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})
logger1.debug(cur.query)
######################
#Geonames
# if record['countrycode'] != None:
# query_template = """
# SELECT uid, name, gadm2 as stateprovince, 'geonames' as source FROM geonames WHERE country_code = '{countrycode}'
# UNION
# SELECT uid, unnest(string_to_array(alternatenames, ',')) as name, gadm2 as stateprovince, 'geonames' as source FROM geonames WHERE country_code = '{countrycode}'
# """
# cur.execute(query_template.format(countrycode = record['countrycode']))
# logger1.debug(cur.query)
# candidates = pd.DataFrame(cur.fetchall())
# logger1.info("No. of candidates: {}".format(len(candidates)))
# if len(candidates) > 0:
# #Iterate each record
# for index, record in records.iterrows():
# logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
# data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)
# for index, row in data.iterrows():
# cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES
# (%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())""", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})
# logger1.debug(cur.query)
######################
#GNIS
if record['countrycode'] == 'US':
logger1.info("GNIS: {}, US".format(record['stateprovince']))
query_template = "SELECT uid, feature_name as name, gadm2 as stateprovince, 'gnis' as source FROM gnis WHERE state_alpha ILIKE '%{stateprovince}%'"
cur.execute(query_template.format(stateprovince = record['stateprovince']))
logger1.debug(cur.query)
candidates = pd.DataFrame(cur.fetchall())
logger1.info("No. of GNIS candidates: {}".format(len(candidates)))
if len(candidates) > 0:
#Iterate each record
for index, record in records.iterrows():
logger1.info("record gbifid: {}".format(record['gbifid']))
logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)
logger1.info("No. of possible matches: {}".format(len(data)))
if len(data) > 0:
for index, row in data.iterrows():
cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES
(%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())""", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})
logger1.debug(cur.query)
#############
#Lakes
if pycountry.countries.get(alpha_2 = record['countrycode']) != None:
country = pycountry.countries.get(alpha_2 = record['countrycode']).name
logger1.info("Lakes: {}".format(country.replace("'", "''")))
query_template = "SELECT uid, lake_name as name, gadm2 as stateprovince, 'global_lakes' as source FROM global_lakes WHERE country ILIKE '%{country}%'"
cur.execute(query_template.format(country = country.replace("'", "''")))
logger1.debug(cur.query)
else:
query_template = "SELECT uid, lake_name as name, gadm2 as stateprovince, 'global_lakes' as source FROM global_lakes"
cur.execute(query_template)
candidates = pd.DataFrame(cur.fetchall())
logger1.info("No. of global_lakes candidates: {}".format(len(candidates)))
if len(candidates) > 0:
#Iterate each record
for index, record in records.iterrows():
logger1.info("record gbifid: {}".format(record['gbifid']))
logger1.info("locality: {}, {}, {}".format(record['locality'], record['stateprovince'], record['countrycode']))
data = search_fuzzy(record['locality'], record['stateprovince'], candidates, method = 'set', threshold = 80)
logger1.info("No. of possible matches: {}".format(len(data)))
if len(data) > 0:
for index, row in data.iterrows():
cur.execute("""INSERT INTO gbif_si_matches (gbifid, source, species, match, score, located_at, timestamp) VALUES
(%(gbifid)s, %(source)s, %(species)s, %(match)s, %(score)s, %(stateprovince)s, NOW())""", {'gbifid': record['gbifid'], 'source': row['source'], 'species': sciname['species'], 'match': str(row['uid']), 'score': row['score'], 'stateprovince': row['stateprovince']})
logger1.debug(cur.query)
#Save summary of results
cur.execute("SELECT count(*) as no_records FROM gbif_si_matches WHERE species = %s", (sciname['species'],))
logger1.debug(cur.query)
no_records = cur.fetchone()
if no_records['no_records'] > 0:
cur.execute("""INSERT INTO gbif_si_summary (species, kingdom, phylum, class, _order, family, genus, no_records)
(SELECT %(species)s, %(kingdom)s, %(phylum)s, %(class)s, %(_order)s, %(family)s, %(genus)s, count(*) FROM gbif_si_matches where species = %(species)s);""", {'species': sciname['species'], 'kingdom': sciname['kingdom'], 'phylum': sciname['phylum'], 'class': sciname['class'], '_order': sciname['_order'], 'family': sciname['family'], 'genus': sciname['genus']})
logger1.debug(cur.query)
cur.execute("DELETE FROM gbif_si_summary WHERE no_records = 0")
sys.exit(0) | true | true |
f73ad7ef473c28b31f934cb214c644f591567ddd | 32,591 | py | Python | inan/farmer/farmer.py | inan0812/chia-blockchain | 8de40989f56fb64d6ff1690ae0c2169cc11ad18b | [
"Apache-2.0"
] | 1 | 2021-09-19T18:59:19.000Z | 2021-09-19T18:59:19.000Z | inan/farmer/farmer.py | inan0812/chia-blockchain | 8de40989f56fb64d6ff1690ae0c2169cc11ad18b | [
"Apache-2.0"
] | null | null | null | inan/farmer/farmer.py | inan0812/chia-blockchain | 8de40989f56fb64d6ff1690ae0c2169cc11ad18b | [
"Apache-2.0"
] | 1 | 2022-02-08T19:58:12.000Z | 2022-02-08T19:58:12.000Z | import asyncio
import json
import logging
import time
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import traceback
import aiohttp
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
import inan.server.ws_connection as ws # lgtm [py/import-and-import-from]
from inan.consensus.coinbase import create_puzzlehash_for_pk
from inan.consensus.constants import ConsensusConstants
from inan.pools.pool_config import PoolWalletConfig, load_pool_config
from inan.protocols import farmer_protocol, harvester_protocol
from inan.protocols.pool_protocol import (
ErrorResponse,
get_current_authentication_token,
GetFarmerResponse,
PoolErrorCode,
PostFarmerPayload,
PostFarmerRequest,
PutFarmerPayload,
PutFarmerRequest,
AuthenticationPayload,
)
from inan.protocols.protocol_message_types import ProtocolMessageTypes
from inan.server.outbound_message import NodeType, make_msg
from inan.server.ws_connection import WSInanConnection
from inan.types.blockchain_format.proof_of_space import ProofOfSpace
from inan.types.blockchain_format.sized_bytes import bytes32
from inan.util.bech32m import decode_puzzle_hash
from inan.util.config import load_config, save_config, config_path_for_filename
from inan.util.hash import std_hash
from inan.util.ints import uint8, uint16, uint32, uint64
from inan.util.keychain import Keychain
from inan.wallet.derive_keys import (
master_sk_to_farmer_sk,
master_sk_to_pool_sk,
master_sk_to_wallet_sk,
find_authentication_sk,
find_owner_sk,
)
from inan.wallet.puzzles.singleton_top_layer import SINGLETON_MOD
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
log = logging.getLogger(__name__)
UPDATE_POOL_INFO_INTERVAL: int = 3600
UPDATE_POOL_FARMER_INFO_INTERVAL: int = 300
UPDATE_HARVESTER_CACHE_INTERVAL: int = 60
"""
HARVESTER PROTOCOL (FARMER <-> HARVESTER)
"""
class Farmer:
def __init__(
self,
root_path: Path,
farmer_config: Dict,
pool_config: Dict,
keychain: Keychain,
consensus_constants: ConsensusConstants,
):
self._root_path = root_path
self.config = farmer_config
# Keep track of all sps, keyed on challenge chain signage point hash
self.sps: Dict[bytes32, List[farmer_protocol.NewSignagePoint]] = {}
# Keep track of harvester plot identifier (str), target sp index, and PoSpace for each challenge
self.proofs_of_space: Dict[bytes32, List[Tuple[str, ProofOfSpace]]] = {}
# Quality string to plot identifier and challenge_hash, for use with harvester.RequestSignatures
self.quality_str_to_identifiers: Dict[bytes32, Tuple[str, bytes32, bytes32, bytes32]] = {}
# number of responses to each signage point
self.number_of_responses: Dict[bytes32, int] = {}
# A dictionary of keys to time added. These keys refer to keys in the above 4 dictionaries. This is used
# to periodically clear the memory
self.cache_add_time: Dict[bytes32, uint64] = {}
self.cache_clear_task: asyncio.Task
self.update_pool_state_task: asyncio.Task
self.constants = consensus_constants
self._shut_down = False
self.server: Any = None
self.keychain = keychain
self.state_changed_callback: Optional[Callable] = None
self.log = log
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in self.keychain.get_all_private_keys()]
self._private_keys = [master_sk_to_farmer_sk(sk) for sk in self.all_root_sks] + [
master_sk_to_pool_sk(sk) for sk in self.all_root_sks
]
if len(self.get_public_keys()) == 0:
error_str = "No keys exist. Please run 'inan keys generate' or open the UI."
raise RuntimeError(error_str)
# This is the farmer configuration
self.farmer_target_encoded = self.config["xgen_target_address"]
self.farmer_target = decode_puzzle_hash(self.farmer_target_encoded)
self.pool_public_keys = [G1Element.from_bytes(bytes.fromhex(pk)) for pk in self.config["pool_public_keys"]]
# This is the self pooling configuration, which is only used for original self-pooled plots
self.pool_target_encoded = pool_config["xgen_target_address"]
self.pool_target = decode_puzzle_hash(self.pool_target_encoded)
self.pool_sks_map: Dict = {}
for key in self.get_private_keys():
self.pool_sks_map[bytes(key.get_g1())] = key
assert len(self.farmer_target) == 32
assert len(self.pool_target) == 32
if len(self.pool_sks_map) == 0:
error_str = "No keys exist. Please run 'inan keys generate' or open the UI."
raise RuntimeError(error_str)
# The variables below are for use with an actual pool
# From p2_singleton_puzzle_hash to pool state dict
self.pool_state: Dict[bytes32, Dict] = {}
# From public key bytes to PrivateKey
self.authentication_keys: Dict[bytes, PrivateKey] = {}
# Last time we updated pool_state based on the config file
self.last_config_access_time: uint64 = uint64(0)
self.harvester_cache: Dict[str, Dict[str, Tuple[Dict, float]]] = {}
async def _start(self):
self.update_pool_state_task = asyncio.create_task(self._periodically_update_pool_state_task())
self.cache_clear_task = asyncio.create_task(self._periodically_clear_cache_and_refresh_task())
def _close(self):
self._shut_down = True
async def _await_closed(self):
await self.cache_clear_task
await self.update_pool_state_task
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def on_connect(self, peer: WSInanConnection):
# Sends a handshake to the harvester
self.state_changed("add_connection", {})
handshake = harvester_protocol.HarvesterHandshake(
self.get_public_keys(),
self.pool_public_keys,
)
if peer.connection_type is NodeType.HARVESTER:
msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake)
await peer.send_message(msg)
def set_server(self, server):
self.server = server
def state_changed(self, change: str, data: Dict[str, Any]):
if self.state_changed_callback is not None:
self.state_changed_callback(change, data)
def handle_failed_pool_response(self, p2_singleton_puzzle_hash: bytes32, error_message: str):
self.log.error(error_message)
self.pool_state[p2_singleton_puzzle_hash]["pool_errors_24h"].append(
ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict()
)
def on_disconnect(self, connection: ws.WSInanConnection):
self.log.info(f"peer disconnected {connection.get_peer_info()}")
self.state_changed("close_connection", {})
async def _pool_get_pool_info(self, pool_config: PoolWalletConfig) -> Optional[Dict]:
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(f"{pool_config.pool_url}/pool_info") as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /pool_info response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /pool_info {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /pool_info {pool_config.pool_url}, {e}"
)
return None
async def _pool_get_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, authentication_sk: PrivateKey
) -> Optional[Dict]:
assert authentication_sk.get_g1() == pool_config.authentication_public_key
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_farmer", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
get_farmer_params = {
"launcher_id": pool_config.launcher_id.hex(),
"authentication_token": authentication_token,
"signature": bytes(signature).hex(),
}
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(f"{pool_config.pool_url}/farmer", params=get_farmer_params) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_post_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
post_farmer_payload: PostFarmerPayload = PostFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
pool_config.authentication_public_key,
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, post_farmer_payload.get_hash())
post_farmer_request = PostFarmerRequest(post_farmer_payload, signature)
post_farmer_body = json.dumps(post_farmer_request.to_json_dict())
headers = {
"content-type": "application/json;",
}
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_config.pool_url}/farmer", data=post_farmer_body, headers=headers
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"POST /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in POST /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in POST /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_put_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
put_farmer_payload: PutFarmerPayload = PutFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
pool_config.authentication_public_key,
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, put_farmer_payload.get_hash())
put_farmer_request = PutFarmerRequest(put_farmer_payload, signature)
put_farmer_body = json.dumps(put_farmer_request.to_json_dict())
try:
async with aiohttp.ClientSession() as session:
async with session.put(f"{pool_config.pool_url}/farmer", data=put_farmer_body) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"PUT /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in PUT /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in PUT /farmer {pool_config.pool_url}, {e}"
)
return None
async def update_pool_state(self):
config = load_config(self._root_path, "config.yaml")
pool_config_list: List[PoolWalletConfig] = load_pool_config(self._root_path)
for pool_config in pool_config_list:
p2_singleton_puzzle_hash = pool_config.p2_singleton_puzzle_hash
try:
authentication_sk: Optional[PrivateKey] = await find_authentication_sk(
self.all_root_sks, pool_config.authentication_public_key
)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}")
continue
if p2_singleton_puzzle_hash not in self.pool_state:
self.authentication_keys[bytes(pool_config.authentication_public_key)] = authentication_sk
self.pool_state[p2_singleton_puzzle_hash] = {
"points_found_since_start": 0,
"points_found_24h": [],
"points_acknowledged_since_start": 0,
"points_acknowledged_24h": [],
"next_farmer_update": 0,
"next_pool_info_update": 0,
"current_points": 0,
"current_difficulty": None,
"pool_errors_24h": [],
"authentication_token_timeout": None,
}
self.log.info(f"Added pool: {pool_config}")
pool_state = self.pool_state[p2_singleton_puzzle_hash]
pool_state["pool_config"] = pool_config
# Skip state update when self pooling
if pool_config.pool_url == "":
continue
enforce_https = config["full_node"]["selected_network"] == "mainnet"
if enforce_https and not pool_config.pool_url.startswith("https://"):
self.log.error(f"Pool URLs must be HTTPS on mainnet {pool_config.pool_url}")
continue
# TODO: Improve error handling below, inform about unexpected failures
if time.time() >= pool_state["next_pool_info_update"]:
# Makes a GET request to the pool to get the updated information
pool_info = await self._pool_get_pool_info(pool_config)
if pool_info is not None and "error_code" not in pool_info:
pool_state["authentication_token_timeout"] = pool_info["authentication_token_timeout"]
pool_state["next_pool_info_update"] = time.time() + UPDATE_POOL_INFO_INTERVAL
# Only update the first time from GET /pool_info, gets updated from GET /farmer later
if pool_state["current_difficulty"] is None:
pool_state["current_difficulty"] = pool_info["minimum_difficulty"]
if time.time() >= pool_state["next_farmer_update"]:
authentication_token_timeout = pool_state["authentication_token_timeout"]
async def update_pool_farmer_info() -> Tuple[Optional[GetFarmerResponse], Optional[bool]]:
# Run a GET /farmer to see if the farmer is already known by the pool
response = await self._pool_get_farmer(
pool_config, authentication_token_timeout, authentication_sk
)
farmer_response: Optional[GetFarmerResponse] = None
farmer_known: Optional[bool] = None
if response is not None:
if "error_code" not in response:
farmer_response = GetFarmerResponse.from_json_dict(response)
if farmer_response is not None:
pool_state["current_difficulty"] = farmer_response.current_difficulty
pool_state["current_points"] = farmer_response.current_points
pool_state["next_farmer_update"] = time.time() + UPDATE_POOL_FARMER_INFO_INTERVAL
else:
farmer_known = response["error_code"] != PoolErrorCode.FARMER_NOT_KNOWN.value
self.log.error(
"update_pool_farmer_info failed: "
f"{response['error_code']}, {response['error_message']}"
)
return farmer_response, farmer_known
if authentication_token_timeout is not None:
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and farmer_is_known is not None and not farmer_is_known:
# Make the farmer known on the pool with a POST /farmer
owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key)
post_response = await self._pool_post_farmer(
pool_config, authentication_token_timeout, owner_sk
)
if post_response is not None and "error_code" not in post_response:
self.log.info(
f"Welcome message from {pool_config.pool_url}: "
f"{post_response['welcome_message']}"
)
# Now we should be able to update the local farmer info
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and not farmer_is_known:
self.log.error("Failed to update farmer info after POST /farmer.")
# Update the payout instructions on the pool if required
if (
farmer_info is not None
and pool_config.payout_instructions != farmer_info.payout_instructions
):
owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key)
put_farmer_response_dict = await self._pool_put_farmer(
pool_config, authentication_token_timeout, owner_sk
)
try:
# put_farmer_response: PutFarmerResponse = PutFarmerResponse.from_json_dict(
# put_farmer_response_dict
# )
# if put_farmer_response.payout_instructions:
# self.log.info(
# f"Farmer information successfully updated on the pool {pool_config.pool_url}"
# )
# TODO: Fix Streamable implementation and recover the above.
if put_farmer_response_dict["payout_instructions"]:
self.log.info(
f"Farmer information successfully updated on the pool {pool_config.pool_url}"
)
else:
raise Exception
except Exception:
self.log.error(
f"Failed to update farmer information on the pool {pool_config.pool_url}"
)
else:
self.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception in update_pool_state for {pool_config.pool_url}, {e} {tb}")
def get_public_keys(self):
return [child_sk.get_g1() for child_sk in self._private_keys]
def get_private_keys(self):
return self._private_keys
def get_reward_targets(self, search_for_private_key: bool) -> Dict:
if search_for_private_key:
all_sks = self.keychain.get_all_private_keys()
stop_searching_for_farmer, stop_searching_for_pool = False, False
for i in range(500):
if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
break
for sk, _ in all_sks:
ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1())
if ph == self.farmer_target:
stop_searching_for_farmer = True
if ph == self.pool_target:
stop_searching_for_pool = True
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
"have_farmer_sk": stop_searching_for_farmer,
"have_pool_sk": stop_searching_for_pool,
}
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
}
def set_reward_targets(self, farmer_target_encoded: Optional[str], pool_target_encoded: Optional[str]):
config = load_config(self._root_path, "config.yaml")
if farmer_target_encoded is not None:
self.farmer_target_encoded = farmer_target_encoded
self.farmer_target = decode_puzzle_hash(farmer_target_encoded)
config["farmer"]["xgen_target_address"] = farmer_target_encoded
if pool_target_encoded is not None:
self.pool_target_encoded = pool_target_encoded
self.pool_target = decode_puzzle_hash(pool_target_encoded)
config["pool"]["xgen_target_address"] = pool_target_encoded
save_config(self._root_path, "config.yaml", config)
async def set_payout_instructions(self, launcher_id: bytes32, payout_instructions: str):
for p2_singleton_puzzle_hash, pool_state_dict in self.pool_state.items():
if launcher_id == pool_state_dict["pool_config"].launcher_id:
config = load_config(self._root_path, "config.yaml")
new_list = []
for list_element in config["pool"]["pool_list"]:
if bytes.fromhex(list_element["launcher_id"]) == bytes(launcher_id):
list_element["payout_instructions"] = payout_instructions
new_list.append(list_element)
config["pool"]["pool_list"] = new_list
save_config(self._root_path, "config.yaml", config)
# Force a GET /farmer which triggers the PUT /farmer if it detects the changed instructions
pool_state_dict["next_farmer_update"] = 0
return
self.log.warning(f"Launcher id: {launcher_id} not found")
async def generate_login_link(self, launcher_id: bytes32) -> Optional[str]:
for pool_state in self.pool_state.values():
pool_config: PoolWalletConfig = pool_state["pool_config"]
if pool_config.launcher_id == launcher_id:
authentication_sk: Optional[PrivateKey] = await find_authentication_sk(
self.all_root_sks, pool_config.authentication_public_key
)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}")
continue
assert authentication_sk.get_g1() == pool_config.authentication_public_key
authentication_token_timeout = pool_state["authentication_token_timeout"]
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_login", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
return (
pool_config.pool_url
+ f"/login?launcher_id={launcher_id.hex()}&authentication_token={authentication_token}"
f"&signature={bytes(signature).hex()}"
)
return None
async def update_cached_harvesters(self):
# First remove outdated cache entries
remove_hosts = []
for host, host_cache in self.harvester_cache.items():
remove_peers = []
for peer_id, peer_cache in host_cache.items():
_, last_update = peer_cache
# If the peer cache hasn't been updated for 10x interval, drop it since the harvester doesn't respond
if time.time() - last_update > UPDATE_HARVESTER_CACHE_INTERVAL * 10:
remove_peers.append(peer_id)
for key in remove_peers:
del host_cache[key]
if len(host_cache) == 0:
remove_hosts.append(host)
for key in remove_hosts:
del self.harvester_cache[key]
# Now query each harvester and update caches
for connection in self.server.get_connections():
if connection.connection_type != NodeType.HARVESTER:
continue
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry is None or time.time() - cache_entry[1] > UPDATE_HARVESTER_CACHE_INTERVAL:
response = await connection.request_plots(harvester_protocol.RequestPlots(), timeout=5)
if response is not None:
if isinstance(response, harvester_protocol.RespondPlots):
if connection.peer_host not in self.harvester_cache:
self.harvester_cache[connection.peer_host] = {}
self.harvester_cache[connection.peer_host][connection.peer_node_id.hex()] = (
response.to_json_dict(),
time.time(),
)
else:
self.log.error(
f"Invalid response from harvester:"
f"peer_host {connection.peer_host}, peer_node_id {connection.peer_node_id}"
)
else:
self.log.error(
"Harvester did not respond. You might need to update harvester to the latest version"
)
async def get_cached_harvesters(self, connection: WSInanConnection) -> Optional[Tuple[Dict, float]]:
host_cache = self.harvester_cache.get(connection.peer_host)
if host_cache is None:
return None
return host_cache.get(connection.peer_node_id.hex())
async def get_harvesters(self) -> Dict:
harvesters: List = []
for connection in self.server.get_connections():
if connection.connection_type != NodeType.HARVESTER:
continue
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry is not None:
harvester_object: dict = dict(cache_entry[0])
harvester_object["connection"] = {
"node_id": connection.peer_node_id.hex(),
"host": connection.peer_host,
"port": connection.peer_port,
}
harvesters.append(harvester_object)
return {"harvesters": harvesters}
async def _periodically_update_pool_state_task(self):
time_slept: uint64 = uint64(0)
config_path: Path = config_path_for_filename(self._root_path, "config.yaml")
while not self._shut_down:
# Every time the config file changes, read it to check the pool state
stat_info = config_path.stat()
if stat_info.st_mtime > self.last_config_access_time:
# If we detect the config file changed, refresh private keys first just in case
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in self.keychain.get_all_private_keys()]
self.last_config_access_time = stat_info.st_mtime
await self.update_pool_state()
time_slept = uint64(0)
elif time_slept > 60:
await self.update_pool_state()
time_slept = uint64(0)
time_slept += 1
await asyncio.sleep(1)
async def _periodically_clear_cache_and_refresh_task(self):
time_slept: uint64 = uint64(0)
refresh_slept = 0
while not self._shut_down:
try:
if time_slept > self.constants.SUB_SLOT_TIME_TARGET:
now = time.time()
removed_keys: List[bytes32] = []
for key, add_time in self.cache_add_time.items():
if now - float(add_time) > self.constants.SUB_SLOT_TIME_TARGET * 3:
self.sps.pop(key, None)
self.proofs_of_space.pop(key, None)
self.quality_str_to_identifiers.pop(key, None)
self.number_of_responses.pop(key, None)
removed_keys.append(key)
for key in removed_keys:
self.cache_add_time.pop(key, None)
time_slept = uint64(0)
log.debug(
f"Cleared farmer cache. Num sps: {len(self.sps)} {len(self.proofs_of_space)} "
f"{len(self.quality_str_to_identifiers)} {len(self.number_of_responses)}"
)
time_slept += 1
refresh_slept += 1
# Periodically refresh GUI to show the correct download/upload rate.
if refresh_slept >= 30:
self.state_changed("add_connection", {})
refresh_slept = 0
# Handles harvester plots cache cleanup and updates
await self.update_cached_harvesters()
except Exception:
log.error(f"_periodically_clear_cache_and_refresh_task failed: {traceback.print_exc()}")
await asyncio.sleep(1)
| 49.986196 | 120 | 0.599184 | import asyncio
import json
import logging
import time
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Tuple
import traceback
import aiohttp
from blspy import AugSchemeMPL, G1Element, G2Element, PrivateKey
import inan.server.ws_connection as ws
from inan.consensus.coinbase import create_puzzlehash_for_pk
from inan.consensus.constants import ConsensusConstants
from inan.pools.pool_config import PoolWalletConfig, load_pool_config
from inan.protocols import farmer_protocol, harvester_protocol
from inan.protocols.pool_protocol import (
ErrorResponse,
get_current_authentication_token,
GetFarmerResponse,
PoolErrorCode,
PostFarmerPayload,
PostFarmerRequest,
PutFarmerPayload,
PutFarmerRequest,
AuthenticationPayload,
)
from inan.protocols.protocol_message_types import ProtocolMessageTypes
from inan.server.outbound_message import NodeType, make_msg
from inan.server.ws_connection import WSInanConnection
from inan.types.blockchain_format.proof_of_space import ProofOfSpace
from inan.types.blockchain_format.sized_bytes import bytes32
from inan.util.bech32m import decode_puzzle_hash
from inan.util.config import load_config, save_config, config_path_for_filename
from inan.util.hash import std_hash
from inan.util.ints import uint8, uint16, uint32, uint64
from inan.util.keychain import Keychain
from inan.wallet.derive_keys import (
master_sk_to_farmer_sk,
master_sk_to_pool_sk,
master_sk_to_wallet_sk,
find_authentication_sk,
find_owner_sk,
)
from inan.wallet.puzzles.singleton_top_layer import SINGLETON_MOD
singleton_mod_hash = SINGLETON_MOD.get_tree_hash()
log = logging.getLogger(__name__)
UPDATE_POOL_INFO_INTERVAL: int = 3600
UPDATE_POOL_FARMER_INFO_INTERVAL: int = 300
UPDATE_HARVESTER_CACHE_INTERVAL: int = 60
class Farmer:
def __init__(
self,
root_path: Path,
farmer_config: Dict,
pool_config: Dict,
keychain: Keychain,
consensus_constants: ConsensusConstants,
):
self._root_path = root_path
self.config = farmer_config
self.sps: Dict[bytes32, List[farmer_protocol.NewSignagePoint]] = {}
self.proofs_of_space: Dict[bytes32, List[Tuple[str, ProofOfSpace]]] = {}
self.quality_str_to_identifiers: Dict[bytes32, Tuple[str, bytes32, bytes32, bytes32]] = {}
self.number_of_responses: Dict[bytes32, int] = {}
self.cache_add_time: Dict[bytes32, uint64] = {}
self.cache_clear_task: asyncio.Task
self.update_pool_state_task: asyncio.Task
self.constants = consensus_constants
self._shut_down = False
self.server: Any = None
self.keychain = keychain
self.state_changed_callback: Optional[Callable] = None
self.log = log
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in self.keychain.get_all_private_keys()]
self._private_keys = [master_sk_to_farmer_sk(sk) for sk in self.all_root_sks] + [
master_sk_to_pool_sk(sk) for sk in self.all_root_sks
]
if len(self.get_public_keys()) == 0:
error_str = "No keys exist. Please run 'inan keys generate' or open the UI."
raise RuntimeError(error_str)
self.farmer_target_encoded = self.config["xgen_target_address"]
self.farmer_target = decode_puzzle_hash(self.farmer_target_encoded)
self.pool_public_keys = [G1Element.from_bytes(bytes.fromhex(pk)) for pk in self.config["pool_public_keys"]]
self.pool_target_encoded = pool_config["xgen_target_address"]
self.pool_target = decode_puzzle_hash(self.pool_target_encoded)
self.pool_sks_map: Dict = {}
for key in self.get_private_keys():
self.pool_sks_map[bytes(key.get_g1())] = key
assert len(self.farmer_target) == 32
assert len(self.pool_target) == 32
if len(self.pool_sks_map) == 0:
error_str = "No keys exist. Please run 'inan keys generate' or open the UI."
raise RuntimeError(error_str)
self.pool_state: Dict[bytes32, Dict] = {}
self.authentication_keys: Dict[bytes, PrivateKey] = {}
self.last_config_access_time: uint64 = uint64(0)
self.harvester_cache: Dict[str, Dict[str, Tuple[Dict, float]]] = {}
async def _start(self):
self.update_pool_state_task = asyncio.create_task(self._periodically_update_pool_state_task())
self.cache_clear_task = asyncio.create_task(self._periodically_clear_cache_and_refresh_task())
def _close(self):
self._shut_down = True
async def _await_closed(self):
await self.cache_clear_task
await self.update_pool_state_task
def _set_state_changed_callback(self, callback: Callable):
self.state_changed_callback = callback
async def on_connect(self, peer: WSInanConnection):
self.state_changed("add_connection", {})
handshake = harvester_protocol.HarvesterHandshake(
self.get_public_keys(),
self.pool_public_keys,
)
if peer.connection_type is NodeType.HARVESTER:
msg = make_msg(ProtocolMessageTypes.harvester_handshake, handshake)
await peer.send_message(msg)
def set_server(self, server):
self.server = server
def state_changed(self, change: str, data: Dict[str, Any]):
if self.state_changed_callback is not None:
self.state_changed_callback(change, data)
def handle_failed_pool_response(self, p2_singleton_puzzle_hash: bytes32, error_message: str):
self.log.error(error_message)
self.pool_state[p2_singleton_puzzle_hash]["pool_errors_24h"].append(
ErrorResponse(uint16(PoolErrorCode.REQUEST_FAILED.value), error_message).to_json_dict()
)
def on_disconnect(self, connection: ws.WSInanConnection):
self.log.info(f"peer disconnected {connection.get_peer_info()}")
self.state_changed("close_connection", {})
async def _pool_get_pool_info(self, pool_config: PoolWalletConfig) -> Optional[Dict]:
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(f"{pool_config.pool_url}/pool_info") as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /pool_info response: {response}")
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /pool_info {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /pool_info {pool_config.pool_url}, {e}"
)
return None
async def _pool_get_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, authentication_sk: PrivateKey
) -> Optional[Dict]:
assert authentication_sk.get_g1() == pool_config.authentication_public_key
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_farmer", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
get_farmer_params = {
"launcher_id": pool_config.launcher_id.hex(),
"authentication_token": authentication_token,
"signature": bytes(signature).hex(),
}
try:
async with aiohttp.ClientSession(trust_env=True) as session:
async with session.get(f"{pool_config.pool_url}/farmer", params=get_farmer_params) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"GET /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in GET /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in GET /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_post_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
post_farmer_payload: PostFarmerPayload = PostFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
pool_config.authentication_public_key,
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, post_farmer_payload.get_hash())
post_farmer_request = PostFarmerRequest(post_farmer_payload, signature)
post_farmer_body = json.dumps(post_farmer_request.to_json_dict())
headers = {
"content-type": "application/json;",
}
try:
async with aiohttp.ClientSession() as session:
async with session.post(
f"{pool_config.pool_url}/farmer", data=post_farmer_body, headers=headers
) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"POST /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in POST /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in POST /farmer {pool_config.pool_url}, {e}"
)
return None
async def _pool_put_farmer(
self, pool_config: PoolWalletConfig, authentication_token_timeout: uint8, owner_sk: PrivateKey
) -> Optional[Dict]:
put_farmer_payload: PutFarmerPayload = PutFarmerPayload(
pool_config.launcher_id,
get_current_authentication_token(authentication_token_timeout),
pool_config.authentication_public_key,
pool_config.payout_instructions,
None,
)
assert owner_sk.get_g1() == pool_config.owner_public_key
signature: G2Element = AugSchemeMPL.sign(owner_sk, put_farmer_payload.get_hash())
put_farmer_request = PutFarmerRequest(put_farmer_payload, signature)
put_farmer_body = json.dumps(put_farmer_request.to_json_dict())
try:
async with aiohttp.ClientSession() as session:
async with session.put(f"{pool_config.pool_url}/farmer", data=put_farmer_body) as resp:
if resp.ok:
response: Dict = json.loads(await resp.text())
self.log.info(f"PUT /farmer response: {response}")
if "error_code" in response:
self.pool_state[pool_config.p2_singleton_puzzle_hash]["pool_errors_24h"].append(response)
return response
else:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash,
f"Error in PUT /farmer {pool_config.pool_url}, {resp.status}",
)
except Exception as e:
self.handle_failed_pool_response(
pool_config.p2_singleton_puzzle_hash, f"Exception in PUT /farmer {pool_config.pool_url}, {e}"
)
return None
async def update_pool_state(self):
config = load_config(self._root_path, "config.yaml")
pool_config_list: List[PoolWalletConfig] = load_pool_config(self._root_path)
for pool_config in pool_config_list:
p2_singleton_puzzle_hash = pool_config.p2_singleton_puzzle_hash
try:
authentication_sk: Optional[PrivateKey] = await find_authentication_sk(
self.all_root_sks, pool_config.authentication_public_key
)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}")
continue
if p2_singleton_puzzle_hash not in self.pool_state:
self.authentication_keys[bytes(pool_config.authentication_public_key)] = authentication_sk
self.pool_state[p2_singleton_puzzle_hash] = {
"points_found_since_start": 0,
"points_found_24h": [],
"points_acknowledged_since_start": 0,
"points_acknowledged_24h": [],
"next_farmer_update": 0,
"next_pool_info_update": 0,
"current_points": 0,
"current_difficulty": None,
"pool_errors_24h": [],
"authentication_token_timeout": None,
}
self.log.info(f"Added pool: {pool_config}")
pool_state = self.pool_state[p2_singleton_puzzle_hash]
pool_state["pool_config"] = pool_config
if pool_config.pool_url == "":
continue
enforce_https = config["full_node"]["selected_network"] == "mainnet"
if enforce_https and not pool_config.pool_url.startswith("https://"):
self.log.error(f"Pool URLs must be HTTPS on mainnet {pool_config.pool_url}")
continue
if time.time() >= pool_state["next_pool_info_update"]:
pool_info = await self._pool_get_pool_info(pool_config)
if pool_info is not None and "error_code" not in pool_info:
pool_state["authentication_token_timeout"] = pool_info["authentication_token_timeout"]
pool_state["next_pool_info_update"] = time.time() + UPDATE_POOL_INFO_INTERVAL
if pool_state["current_difficulty"] is None:
pool_state["current_difficulty"] = pool_info["minimum_difficulty"]
if time.time() >= pool_state["next_farmer_update"]:
authentication_token_timeout = pool_state["authentication_token_timeout"]
async def update_pool_farmer_info() -> Tuple[Optional[GetFarmerResponse], Optional[bool]]:
response = await self._pool_get_farmer(
pool_config, authentication_token_timeout, authentication_sk
)
farmer_response: Optional[GetFarmerResponse] = None
farmer_known: Optional[bool] = None
if response is not None:
if "error_code" not in response:
farmer_response = GetFarmerResponse.from_json_dict(response)
if farmer_response is not None:
pool_state["current_difficulty"] = farmer_response.current_difficulty
pool_state["current_points"] = farmer_response.current_points
pool_state["next_farmer_update"] = time.time() + UPDATE_POOL_FARMER_INFO_INTERVAL
else:
farmer_known = response["error_code"] != PoolErrorCode.FARMER_NOT_KNOWN.value
self.log.error(
"update_pool_farmer_info failed: "
f"{response['error_code']}, {response['error_message']}"
)
return farmer_response, farmer_known
if authentication_token_timeout is not None:
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and farmer_is_known is not None and not farmer_is_known:
owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key)
post_response = await self._pool_post_farmer(
pool_config, authentication_token_timeout, owner_sk
)
if post_response is not None and "error_code" not in post_response:
self.log.info(
f"Welcome message from {pool_config.pool_url}: "
f"{post_response['welcome_message']}"
)
farmer_info, farmer_is_known = await update_pool_farmer_info()
if farmer_info is None and not farmer_is_known:
self.log.error("Failed to update farmer info after POST /farmer.")
if (
farmer_info is not None
and pool_config.payout_instructions != farmer_info.payout_instructions
):
owner_sk = await find_owner_sk(self.all_root_sks, pool_config.owner_public_key)
put_farmer_response_dict = await self._pool_put_farmer(
pool_config, authentication_token_timeout, owner_sk
)
try:
if put_farmer_response_dict["payout_instructions"]:
self.log.info(
f"Farmer information successfully updated on the pool {pool_config.pool_url}"
)
else:
raise Exception
except Exception:
self.log.error(
f"Failed to update farmer information on the pool {pool_config.pool_url}"
)
else:
self.log.warning(
f"No pool specific authentication_token_timeout has been set for {p2_singleton_puzzle_hash}"
f", check communication with the pool."
)
except Exception as e:
tb = traceback.format_exc()
self.log.error(f"Exception in update_pool_state for {pool_config.pool_url}, {e} {tb}")
def get_public_keys(self):
return [child_sk.get_g1() for child_sk in self._private_keys]
def get_private_keys(self):
return self._private_keys
def get_reward_targets(self, search_for_private_key: bool) -> Dict:
if search_for_private_key:
all_sks = self.keychain.get_all_private_keys()
stop_searching_for_farmer, stop_searching_for_pool = False, False
for i in range(500):
if stop_searching_for_farmer and stop_searching_for_pool and i > 0:
break
for sk, _ in all_sks:
ph = create_puzzlehash_for_pk(master_sk_to_wallet_sk(sk, uint32(i)).get_g1())
if ph == self.farmer_target:
stop_searching_for_farmer = True
if ph == self.pool_target:
stop_searching_for_pool = True
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
"have_farmer_sk": stop_searching_for_farmer,
"have_pool_sk": stop_searching_for_pool,
}
return {
"farmer_target": self.farmer_target_encoded,
"pool_target": self.pool_target_encoded,
}
def set_reward_targets(self, farmer_target_encoded: Optional[str], pool_target_encoded: Optional[str]):
config = load_config(self._root_path, "config.yaml")
if farmer_target_encoded is not None:
self.farmer_target_encoded = farmer_target_encoded
self.farmer_target = decode_puzzle_hash(farmer_target_encoded)
config["farmer"]["xgen_target_address"] = farmer_target_encoded
if pool_target_encoded is not None:
self.pool_target_encoded = pool_target_encoded
self.pool_target = decode_puzzle_hash(pool_target_encoded)
config["pool"]["xgen_target_address"] = pool_target_encoded
save_config(self._root_path, "config.yaml", config)
async def set_payout_instructions(self, launcher_id: bytes32, payout_instructions: str):
for p2_singleton_puzzle_hash, pool_state_dict in self.pool_state.items():
if launcher_id == pool_state_dict["pool_config"].launcher_id:
config = load_config(self._root_path, "config.yaml")
new_list = []
for list_element in config["pool"]["pool_list"]:
if bytes.fromhex(list_element["launcher_id"]) == bytes(launcher_id):
list_element["payout_instructions"] = payout_instructions
new_list.append(list_element)
config["pool"]["pool_list"] = new_list
save_config(self._root_path, "config.yaml", config)
pool_state_dict["next_farmer_update"] = 0
return
self.log.warning(f"Launcher id: {launcher_id} not found")
async def generate_login_link(self, launcher_id: bytes32) -> Optional[str]:
for pool_state in self.pool_state.values():
pool_config: PoolWalletConfig = pool_state["pool_config"]
if pool_config.launcher_id == launcher_id:
authentication_sk: Optional[PrivateKey] = await find_authentication_sk(
self.all_root_sks, pool_config.authentication_public_key
)
if authentication_sk is None:
self.log.error(f"Could not find authentication sk for pk: {pool_config.authentication_public_key}")
continue
assert authentication_sk.get_g1() == pool_config.authentication_public_key
authentication_token_timeout = pool_state["authentication_token_timeout"]
authentication_token = get_current_authentication_token(authentication_token_timeout)
message: bytes32 = std_hash(
AuthenticationPayload(
"get_login", pool_config.launcher_id, pool_config.target_puzzle_hash, authentication_token
)
)
signature: G2Element = AugSchemeMPL.sign(authentication_sk, message)
return (
pool_config.pool_url
+ f"/login?launcher_id={launcher_id.hex()}&authentication_token={authentication_token}"
f"&signature={bytes(signature).hex()}"
)
return None
async def update_cached_harvesters(self):
remove_hosts = []
for host, host_cache in self.harvester_cache.items():
remove_peers = []
for peer_id, peer_cache in host_cache.items():
_, last_update = peer_cache
if time.time() - last_update > UPDATE_HARVESTER_CACHE_INTERVAL * 10:
remove_peers.append(peer_id)
for key in remove_peers:
del host_cache[key]
if len(host_cache) == 0:
remove_hosts.append(host)
for key in remove_hosts:
del self.harvester_cache[key]
for connection in self.server.get_connections():
if connection.connection_type != NodeType.HARVESTER:
continue
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry is None or time.time() - cache_entry[1] > UPDATE_HARVESTER_CACHE_INTERVAL:
response = await connection.request_plots(harvester_protocol.RequestPlots(), timeout=5)
if response is not None:
if isinstance(response, harvester_protocol.RespondPlots):
if connection.peer_host not in self.harvester_cache:
self.harvester_cache[connection.peer_host] = {}
self.harvester_cache[connection.peer_host][connection.peer_node_id.hex()] = (
response.to_json_dict(),
time.time(),
)
else:
self.log.error(
f"Invalid response from harvester:"
f"peer_host {connection.peer_host}, peer_node_id {connection.peer_node_id}"
)
else:
self.log.error(
"Harvester did not respond. You might need to update harvester to the latest version"
)
async def get_cached_harvesters(self, connection: WSInanConnection) -> Optional[Tuple[Dict, float]]:
host_cache = self.harvester_cache.get(connection.peer_host)
if host_cache is None:
return None
return host_cache.get(connection.peer_node_id.hex())
async def get_harvesters(self) -> Dict:
harvesters: List = []
for connection in self.server.get_connections():
if connection.connection_type != NodeType.HARVESTER:
continue
cache_entry = await self.get_cached_harvesters(connection)
if cache_entry is not None:
harvester_object: dict = dict(cache_entry[0])
harvester_object["connection"] = {
"node_id": connection.peer_node_id.hex(),
"host": connection.peer_host,
"port": connection.peer_port,
}
harvesters.append(harvester_object)
return {"harvesters": harvesters}
async def _periodically_update_pool_state_task(self):
time_slept: uint64 = uint64(0)
config_path: Path = config_path_for_filename(self._root_path, "config.yaml")
while not self._shut_down:
stat_info = config_path.stat()
if stat_info.st_mtime > self.last_config_access_time:
self.all_root_sks: List[PrivateKey] = [sk for sk, _ in self.keychain.get_all_private_keys()]
self.last_config_access_time = stat_info.st_mtime
await self.update_pool_state()
time_slept = uint64(0)
elif time_slept > 60:
await self.update_pool_state()
time_slept = uint64(0)
time_slept += 1
await asyncio.sleep(1)
async def _periodically_clear_cache_and_refresh_task(self):
time_slept: uint64 = uint64(0)
refresh_slept = 0
while not self._shut_down:
try:
if time_slept > self.constants.SUB_SLOT_TIME_TARGET:
now = time.time()
removed_keys: List[bytes32] = []
for key, add_time in self.cache_add_time.items():
if now - float(add_time) > self.constants.SUB_SLOT_TIME_TARGET * 3:
self.sps.pop(key, None)
self.proofs_of_space.pop(key, None)
self.quality_str_to_identifiers.pop(key, None)
self.number_of_responses.pop(key, None)
removed_keys.append(key)
for key in removed_keys:
self.cache_add_time.pop(key, None)
time_slept = uint64(0)
log.debug(
f"Cleared farmer cache. Num sps: {len(self.sps)} {len(self.proofs_of_space)} "
f"{len(self.quality_str_to_identifiers)} {len(self.number_of_responses)}"
)
time_slept += 1
refresh_slept += 1
if refresh_slept >= 30:
self.state_changed("add_connection", {})
refresh_slept = 0
await self.update_cached_harvesters()
except Exception:
log.error(f"_periodically_clear_cache_and_refresh_task failed: {traceback.print_exc()}")
await asyncio.sleep(1)
| true | true |
f73ad8326fe77a4db799abaf6eab17b6b2d8466b | 561 | py | Python | setup.py | mikemccabe210/amortizedassimilation | 25652884869904d7ec1e1a84856cc364c8420467 | [
"MIT"
] | 3 | 2021-12-07T02:06:15.000Z | 2022-03-01T16:56:23.000Z | setup.py | mikemccabe210/amortizedassimilation | 25652884869904d7ec1e1a84856cc364c8420467 | [
"MIT"
] | null | null | null | setup.py | mikemccabe210/amortizedassimilation | 25652884869904d7ec1e1a84856cc364c8420467 | [
"MIT"
] | null | null | null | import setuptools
setuptools.setup(
name="amortized_assimilation",
version="0.0.1",
author="Anonymized",
author_email="Anonymized",
description="Learned uncertainty-aware filters for assimilation noisy high dimensional observational data",
url="Anonymized",
packages=['amortized_assimilation'],
install_requires=['torch>=1.3.1',
'matplotlib>=3.1.0',
'torchdiffeq>=0.0.1',
'numpy>=1.16.4'],
classifiers=(
"Programming Language :: Python :: 3"),) | 35.0625 | 111 | 0.602496 | import setuptools
setuptools.setup(
name="amortized_assimilation",
version="0.0.1",
author="Anonymized",
author_email="Anonymized",
description="Learned uncertainty-aware filters for assimilation noisy high dimensional observational data",
url="Anonymized",
packages=['amortized_assimilation'],
install_requires=['torch>=1.3.1',
'matplotlib>=3.1.0',
'torchdiffeq>=0.0.1',
'numpy>=1.16.4'],
classifiers=(
"Programming Language :: Python :: 3"),) | true | true |
f73ad8ae5cf3b32721cc59b89e9267f009100966 | 1,059 | py | Python | src/m1_run_this_on_robot.py | dandanT-T/99-CapstoneProject-201920 | c66efc204dbba94f136af22d48ada6ae346097f1 | [
"MIT"
] | null | null | null | src/m1_run_this_on_robot.py | dandanT-T/99-CapstoneProject-201920 | c66efc204dbba94f136af22d48ada6ae346097f1 | [
"MIT"
] | null | null | null | src/m1_run_this_on_robot.py | dandanT-T/99-CapstoneProject-201920 | c66efc204dbba94f136af22d48ada6ae346097f1 | [
"MIT"
] | null | null | null | """
Capstone Project. Code to run on the EV3 robot (NOT on a laptop).
Author: Your professors (for the framework)
and Zhicheng Kai.
Winter term, 2018-2019.
"""
import rosebot
import mqtt_remote_method_calls as com
import time
import shared_gui_delegate_on_robot
def main():
"""
This code, which must run on the EV3 ROBOT:
1. Makes the EV3 robot to various things.
2. Communicates via MQTT with the GUI code that runs on the LAPTOP.
"""
real_thing()
def real_thing():
robot = rosebot.RoseBot()
delegate = shared_gui_delegate_on_robot.Handler(robot)
mqtt_receiver = com.MqttClient(delegate)
robot.drive_system.mqtt_sender = mqtt_receiver
mqtt_receiver.connect_to_pc()
while True:
time.sleep(0.01)
if delegate.need_to_stop:
print('quit')
break
# -----------------------------------------------------------------------------
# Calls main to start the ball rolling.
# -----------------------------------------------------------------------------
main() | 27.868421 | 79 | 0.580737 |
import rosebot
import mqtt_remote_method_calls as com
import time
import shared_gui_delegate_on_robot
def main():
real_thing()
def real_thing():
robot = rosebot.RoseBot()
delegate = shared_gui_delegate_on_robot.Handler(robot)
mqtt_receiver = com.MqttClient(delegate)
robot.drive_system.mqtt_sender = mqtt_receiver
mqtt_receiver.connect_to_pc()
while True:
time.sleep(0.01)
if delegate.need_to_stop:
print('quit')
break
main() | true | true |
f73ad99f2c1b621669e07354f5a18fcdcd5fb2d0 | 701 | py | Python | predictor.py | omergunal/Attacker-Group-Predictor | 77475abb994b36a04e159d50af44f4ef9baf5da5 | [
"Apache-2.0"
] | 53 | 2020-05-31T18:39:57.000Z | 2022-02-15T07:49:42.000Z | predictor.py | omergunal/Attacker-Group-Predictor | 77475abb994b36a04e159d50af44f4ef9baf5da5 | [
"Apache-2.0"
] | 1 | 2021-08-03T22:02:54.000Z | 2021-08-09T20:02:23.000Z | predictor.py | omergunal/Attacker-Group-Predictor | 77475abb994b36a04e159d50af44f4ef9baf5da5 | [
"Apache-2.0"
] | 11 | 2020-06-03T06:55:14.000Z | 2020-06-21T14:20:47.000Z | import os
from ast import literal_eval
from collections import Counter
def find_groups(techniques,softwares):
f = open("groupInfo.txt", "r")
data = f.read()
data = literal_eval(data)
techniques = techniques.split(",")
softwares = softwares.split(",")
rate = {}
for group in data:
rate[group] = 0
for technique in techniques:
for group_technique in data[group]["techniques"]:
if technique.lower() in group_technique.lower():
rate[group] += 1
for software in softwares:
for group_technique in data[group]["techniques"]:
if software.lower() in group_technique.lower():
rate[group] += 1
result = Counter(rate)
result = result.most_common(5)
return(result)
| 23.366667 | 52 | 0.697575 | import os
from ast import literal_eval
from collections import Counter
def find_groups(techniques,softwares):
f = open("groupInfo.txt", "r")
data = f.read()
data = literal_eval(data)
techniques = techniques.split(",")
softwares = softwares.split(",")
rate = {}
for group in data:
rate[group] = 0
for technique in techniques:
for group_technique in data[group]["techniques"]:
if technique.lower() in group_technique.lower():
rate[group] += 1
for software in softwares:
for group_technique in data[group]["techniques"]:
if software.lower() in group_technique.lower():
rate[group] += 1
result = Counter(rate)
result = result.most_common(5)
return(result)
| true | true |
f73ad9e6b36c7bb4903f7d10a8322f4abeaafed4 | 16,754 | py | Python | openslides_backend/action/actions/meeting/import_.py | ostcar/openslides-backend | e6ceac497c37a1e3e7f408c6cfb29cf21d985b4c | [
"MIT"
] | null | null | null | openslides_backend/action/actions/meeting/import_.py | ostcar/openslides-backend | e6ceac497c37a1e3e7f408c6cfb29cf21d985b4c | [
"MIT"
] | null | null | null | openslides_backend/action/actions/meeting/import_.py | ostcar/openslides-backend | e6ceac497c37a1e3e7f408c6cfb29cf21d985b4c | [
"MIT"
] | null | null | null | import time
from collections import defaultdict
from typing import Any, Dict, Iterable, Optional, Tuple
from ....models.base import model_registry
from ....models.checker import Checker, CheckException
from ....models.fields import (
BaseGenericRelationField,
BaseRelationField,
BaseTemplateField,
GenericRelationField,
GenericRelationListField,
RelationField,
RelationListField,
)
from ....models.models import Meeting
from ....permissions.management_levels import CommitteeManagementLevel
from ....permissions.permission_helper import has_committee_management_level
from ....shared.exceptions import ActionException, MissingPermission
from ....shared.filters import FilterOperator
from ....shared.interfaces.event import EventType
from ....shared.interfaces.write_request import WriteRequest
from ....shared.patterns import KEYSEPARATOR, Collection, FullQualifiedId
from ...action import Action
from ...mixins.singular_action_mixin import SingularActionMixin
from ...util.crypto import get_random_string
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from ...util.typing import ActionData, ActionResultElement, ActionResults
from ..motion.update import RECOMMENDATION_EXTENSION_REFERENCE_IDS_PATTERN
from ..user.user_mixin import LimitOfUserMixin
@register_action("meeting.import")
class MeetingImport(SingularActionMixin, LimitOfUserMixin, Action):
"""
Action to import a meeting.
"""
model = Meeting()
schema = DefaultSchema(Meeting()).get_default_schema(
required_properties=["committee_id"],
additional_required_fields={"meeting": {"type": "object"}},
title="Import meeting",
description="Import a meeting into the committee.",
)
def perform(
self, action_data: ActionData, user_id: int, internal: bool = False
) -> Tuple[Optional[WriteRequest], Optional[ActionResults]]:
"""
Simplified entrypoint to perform the action.
"""
self.user_id = user_id
self.index = 0
action_data = self.get_updated_instances(action_data)
instance = next(iter(action_data))
self.validate_instance(instance)
try:
self.check_permissions(instance)
except MissingPermission as e:
msg = f"You are not allowed to perform action {self.name}."
e.message = msg + " " + e.message
raise e
instance = self.base_update_instance(instance)
self.write_requests.extend(self.create_write_requests(instance))
final_write_request = self.process_write_requests()
result = [self.create_action_result_element(instance)]
return (final_write_request, result)
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
meeting_json = instance["meeting"]
# checks if the meeting is correct
if not len(meeting_json.get("meeting", {}).values()) == 1:
raise ActionException("Need exact one meeting in meeting collection.")
self.check_usernames_and_generate_new_ones(meeting_json)
active_user_in_json = len(
[
key
for key in meeting_json.get("user", [])
if meeting_json["user"][key]["is_active"]
]
)
self.check_limit_of_user(active_user_in_json)
# save blobs from mediafiles
self.mediadata = []
for entry in meeting_json.get("mediafile", {}).values():
if "blob" in entry:
self.mediadata.append(
(entry.pop("blob"), entry["id"], entry["mimetype"])
)
# check datavalidation
checker = Checker(data=meeting_json, mode="external")
try:
checker.run_check()
except CheckException as ce:
raise ActionException(str(ce))
self.allowed_collections = checker.allowed_collections
for entry in meeting_json.get("motion", {}).values():
if entry.get("all_origin_ids") or entry.get("all_derived_motion_ids"):
raise ActionException(
"Motion all_origin_ids and all_derived_motion_ids should be empty."
)
organization_id = self.check_limit_of_meetings(instance["committee_id"])
self.update_meeting_and_users(instance, organization_id)
# replace ids in the meeting_json
self.create_replace_map(meeting_json)
self.replace_fields(instance)
self.update_admin_group(meeting_json)
self.upload_mediadata()
return instance
def check_usernames_and_generate_new_ones(self, json_data: Dict[str, Any]) -> None:
used_usernames = set()
for entry in json_data.get("user", {}).values():
is_username_unique = False
template_username = entry["username"]
count = 1
while not is_username_unique:
if entry["username"] in used_usernames:
entry["username"] = template_username + " " + str(count)
count += 1
continue
result = self.datastore.filter(
Collection("user"),
FilterOperator("username", "=", entry["username"]),
["id"],
)
if result:
entry["username"] = template_username + " " + str(count)
count += 1
continue
is_username_unique = True
used_usernames.add(entry["username"])
def check_limit_of_meetings(self, committee_id: int, text: str = "import") -> int:
committee = self.datastore.get(
FullQualifiedId(Collection("committee"), committee_id), ["organization_id"]
)
organization_id = committee.get("organization_id", 0)
organization = self.datastore.get(
FullQualifiedId(Collection("organization"), organization_id),
["active_meeting_ids", "limit_of_meetings"],
)
if (
limit_of_meetings := organization.get("limit_of_meetings", 0)
) and limit_of_meetings == len(organization.get("active_meeting_ids", [])):
raise ActionException(
f"You cannot {text} an active meeting, because you reached your limit of {limit_of_meetings} active meetings."
)
return organization_id
def update_meeting_and_users(
self, instance: Dict[str, Any], organization_id: int
) -> None:
# update committee_id and is_active_in_organization_id
json_data = instance["meeting"]
self.get_meeting_from_json(json_data)["committee_id"] = instance["committee_id"]
self.get_meeting_from_json(json_data)[
"is_active_in_organization_id"
] = organization_id
# generate passwords
for entry in json_data["user"].values():
entry["password"] = self.auth.hash(get_random_string(10))
# set enable_anonymous
self.get_meeting_from_json(json_data)["enable_anonymous"] = False
# set imported_at
self.get_meeting_from_json(json_data)["imported_at"] = round(time.time())
def get_meeting_from_json(self, json_data: Any) -> Any:
"""
Small helper to retrieve the one and only meeting object from the import data.
"""
key = next(iter(json_data["meeting"]))
return json_data["meeting"][key]
def create_replace_map(self, json_data: Dict[str, Any]) -> None:
replace_map: Dict[str, Dict[int, int]] = defaultdict(dict)
for collection in json_data:
if not json_data[collection]:
continue
new_ids = self.datastore.reserve_ids(
Collection(collection), len(json_data[collection])
)
for entry, new_id in zip(json_data[collection].values(), new_ids):
replace_map[collection][entry["id"]] = new_id
self.replace_map = replace_map
def replace_fields(self, instance: Dict[str, Any]) -> None:
json_data = instance["meeting"]
new_json_data = {}
for collection in json_data:
new_collection = {}
for entry in json_data[collection].values():
for field in list(entry.keys()):
self.replace_field_ids(collection, entry, field)
new_collection[str(entry["id"])] = entry
new_json_data[collection] = new_collection
instance["meeting"] = new_json_data
def replace_field_ids(
self,
collection: str,
entry: Dict[str, Any],
field: str,
) -> None:
model_field = model_registry[Collection(collection)]().try_get_field(field)
if model_field is None:
raise ActionException(f"{collection}/{field} is not allowed.")
if isinstance(model_field, BaseRelationField):
if isinstance(model_field, BaseGenericRelationField):
content_list = (
content
if isinstance(content := entry.get(field), list)
else [content]
)
target_collections = [
item.split(KEYSEPARATOR)[0] for item in content_list if item
]
else:
target_collections = [k.collection for k in model_field.to.keys()]
if all(c not in self.allowed_collections for c in target_collections):
return
if field == "id":
entry["id"] = self.replace_map[collection][entry["id"]]
elif (
collection == "meeting"
and field == "user_ids"
and "user" in self.allowed_collections
):
entry[field] = [
self.replace_map["user"][id_] for id_ in entry.get(field) or []
]
elif collection == "user" and field == "meeting_ids":
entry[field] = list(self.replace_map["meeting"].values())
elif collection == "motion" and field == "recommendation_extension":
if entry[field]:
fqids_str = RECOMMENDATION_EXTENSION_REFERENCE_IDS_PATTERN.findall(
entry[field]
)
entry_str = entry[field]
entry_list = []
for fqid in fqids_str:
search_str = "[" + fqid + "]"
idx = entry_str.find(search_str)
entry_list.append(entry_str[:idx])
col, id_ = fqid.split(KEYSEPARATOR)
replace_str = (
"["
+ col
+ KEYSEPARATOR
+ str(self.replace_map[col][int(id_)])
+ "]"
)
entry_list.append(replace_str)
entry_str = entry_str[idx + len(replace_str) :]
entry_list.append(entry_str)
entry[field] = "".join(entry_list)
else:
if (
isinstance(model_field, BaseTemplateField)
and model_field.is_template_field(field)
and model_field.replacement_collection
):
entry[field] = [
str(
self.replace_map[model_field.replacement_collection.collection][
int(id_)
]
)
for id_ in entry[field]
]
elif (
isinstance(model_field, BaseTemplateField)
and model_field.is_template_field(field)
and not model_field.replacement_collection
):
pass
elif isinstance(model_field, RelationField):
target_collection = model_field.get_target_collection().collection
if entry[field]:
entry[field] = self.replace_map[target_collection][entry[field]]
elif isinstance(model_field, RelationListField):
target_collection = model_field.get_target_collection().collection
entry[field] = [
self.replace_map[target_collection][id_]
for id_ in entry.get(field) or []
]
elif isinstance(model_field, GenericRelationField):
if entry[field]:
name, id_ = entry[field].split(KEYSEPARATOR)
entry[field] = (
name + KEYSEPARATOR + str(self.replace_map[name][int(id_)])
)
elif isinstance(model_field, GenericRelationListField):
new_fqid_list = []
for fqid in entry[field]:
name, id_ = fqid.split(KEYSEPARATOR)
new_fqid_list.append(
name + KEYSEPARATOR + str(self.replace_map[name][int(id_)])
)
entry[field] = new_fqid_list
if (
isinstance(model_field, BaseTemplateField)
and model_field.replacement_collection
and not model_field.is_template_field(field)
):
replacement = model_field.get_replacement(field)
id_ = int(replacement)
new_id_ = self.replace_map[
model_field.replacement_collection.collection
][id_]
new_field = model_field.get_structured_field_name(new_id_)
tmp = entry[field]
del entry[field]
entry[new_field] = tmp
def update_admin_group(self, data_json: Dict[str, Any]) -> None:
admin_group_id = self.get_meeting_from_json(data_json)["admin_group_id"]
for entry in data_json["group"].values():
if entry["id"] == admin_group_id:
if entry["user_ids"]:
entry["user_ids"].insert(0, self.user_id)
else:
entry["user_ids"] = [self.user_id]
self.get_meeting_from_json(data_json)["user_ids"].insert(0, self.user_id)
def upload_mediadata(self) -> None:
for blob, id_, mimetype in self.mediadata:
replaced_id = self.replace_map["mediafile"][id_]
self.media.upload_mediafile(blob, replaced_id, mimetype)
def create_write_requests(self, instance: Dict[str, Any]) -> Iterable[WriteRequest]:
json_data = instance["meeting"]
meeting_id = self.get_meeting_from_json(json_data)["id"]
write_requests = []
for collection in json_data:
for entry in json_data[collection].values():
fqid = FullQualifiedId(Collection(collection), entry["id"])
write_requests.append(
self.build_write_request(
EventType.Create,
fqid,
f"import meeting {meeting_id}",
entry,
)
)
# add meeting to committee/meeting_ids
write_requests.append(
self.build_write_request(
EventType.Update,
FullQualifiedId(
Collection("committee"),
self.get_meeting_from_json(json_data)["committee_id"],
),
f"import meeting {meeting_id}",
None,
{"add": {"meeting_ids": [meeting_id]}, "remove": {}},
)
)
# add meeting to organization/active_meeting_ids if not archived
if self.get_meeting_from_json(json_data).get("is_active_in_organization_id"):
write_requests.append(
self.build_write_request(
EventType.Update,
FullQualifiedId(Collection("organization"), 1),
f"import meeting {meeting_id}",
None,
{"add": {"active_meeting_ids": [meeting_id]}, "remove": {}},
)
)
return write_requests
def create_action_result_element(
self, instance: Dict[str, Any]
) -> Optional[ActionResultElement]:
"""Returns the newly created id."""
return {"id": self.get_meeting_from_json(instance["meeting"])["id"]}
def check_permissions(self, instance: Dict[str, Any]) -> None:
if not has_committee_management_level(
self.datastore,
self.user_id,
CommitteeManagementLevel.CAN_MANAGE,
instance["committee_id"],
):
raise MissingPermission(CommitteeManagementLevel.CAN_MANAGE)
| 41.885 | 126 | 0.580936 | import time
from collections import defaultdict
from typing import Any, Dict, Iterable, Optional, Tuple
from ....models.base import model_registry
from ....models.checker import Checker, CheckException
from ....models.fields import (
BaseGenericRelationField,
BaseRelationField,
BaseTemplateField,
GenericRelationField,
GenericRelationListField,
RelationField,
RelationListField,
)
from ....models.models import Meeting
from ....permissions.management_levels import CommitteeManagementLevel
from ....permissions.permission_helper import has_committee_management_level
from ....shared.exceptions import ActionException, MissingPermission
from ....shared.filters import FilterOperator
from ....shared.interfaces.event import EventType
from ....shared.interfaces.write_request import WriteRequest
from ....shared.patterns import KEYSEPARATOR, Collection, FullQualifiedId
from ...action import Action
from ...mixins.singular_action_mixin import SingularActionMixin
from ...util.crypto import get_random_string
from ...util.default_schema import DefaultSchema
from ...util.register import register_action
from ...util.typing import ActionData, ActionResultElement, ActionResults
from ..motion.update import RECOMMENDATION_EXTENSION_REFERENCE_IDS_PATTERN
from ..user.user_mixin import LimitOfUserMixin
@register_action("meeting.import")
class MeetingImport(SingularActionMixin, LimitOfUserMixin, Action):
model = Meeting()
schema = DefaultSchema(Meeting()).get_default_schema(
required_properties=["committee_id"],
additional_required_fields={"meeting": {"type": "object"}},
title="Import meeting",
description="Import a meeting into the committee.",
)
def perform(
self, action_data: ActionData, user_id: int, internal: bool = False
) -> Tuple[Optional[WriteRequest], Optional[ActionResults]]:
self.user_id = user_id
self.index = 0
action_data = self.get_updated_instances(action_data)
instance = next(iter(action_data))
self.validate_instance(instance)
try:
self.check_permissions(instance)
except MissingPermission as e:
msg = f"You are not allowed to perform action {self.name}."
e.message = msg + " " + e.message
raise e
instance = self.base_update_instance(instance)
self.write_requests.extend(self.create_write_requests(instance))
final_write_request = self.process_write_requests()
result = [self.create_action_result_element(instance)]
return (final_write_request, result)
def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:
meeting_json = instance["meeting"]
if not len(meeting_json.get("meeting", {}).values()) == 1:
raise ActionException("Need exact one meeting in meeting collection.")
self.check_usernames_and_generate_new_ones(meeting_json)
active_user_in_json = len(
[
key
for key in meeting_json.get("user", [])
if meeting_json["user"][key]["is_active"]
]
)
self.check_limit_of_user(active_user_in_json)
self.mediadata = []
for entry in meeting_json.get("mediafile", {}).values():
if "blob" in entry:
self.mediadata.append(
(entry.pop("blob"), entry["id"], entry["mimetype"])
)
checker = Checker(data=meeting_json, mode="external")
try:
checker.run_check()
except CheckException as ce:
raise ActionException(str(ce))
self.allowed_collections = checker.allowed_collections
for entry in meeting_json.get("motion", {}).values():
if entry.get("all_origin_ids") or entry.get("all_derived_motion_ids"):
raise ActionException(
"Motion all_origin_ids and all_derived_motion_ids should be empty."
)
organization_id = self.check_limit_of_meetings(instance["committee_id"])
self.update_meeting_and_users(instance, organization_id)
self.create_replace_map(meeting_json)
self.replace_fields(instance)
self.update_admin_group(meeting_json)
self.upload_mediadata()
return instance
def check_usernames_and_generate_new_ones(self, json_data: Dict[str, Any]) -> None:
used_usernames = set()
for entry in json_data.get("user", {}).values():
is_username_unique = False
template_username = entry["username"]
count = 1
while not is_username_unique:
if entry["username"] in used_usernames:
entry["username"] = template_username + " " + str(count)
count += 1
continue
result = self.datastore.filter(
Collection("user"),
FilterOperator("username", "=", entry["username"]),
["id"],
)
if result:
entry["username"] = template_username + " " + str(count)
count += 1
continue
is_username_unique = True
used_usernames.add(entry["username"])
def check_limit_of_meetings(self, committee_id: int, text: str = "import") -> int:
committee = self.datastore.get(
FullQualifiedId(Collection("committee"), committee_id), ["organization_id"]
)
organization_id = committee.get("organization_id", 0)
organization = self.datastore.get(
FullQualifiedId(Collection("organization"), organization_id),
["active_meeting_ids", "limit_of_meetings"],
)
if (
limit_of_meetings := organization.get("limit_of_meetings", 0)
) and limit_of_meetings == len(organization.get("active_meeting_ids", [])):
raise ActionException(
f"You cannot {text} an active meeting, because you reached your limit of {limit_of_meetings} active meetings."
)
return organization_id
def update_meeting_and_users(
self, instance: Dict[str, Any], organization_id: int
) -> None:
json_data = instance["meeting"]
self.get_meeting_from_json(json_data)["committee_id"] = instance["committee_id"]
self.get_meeting_from_json(json_data)[
"is_active_in_organization_id"
] = organization_id
for entry in json_data["user"].values():
entry["password"] = self.auth.hash(get_random_string(10))
self.get_meeting_from_json(json_data)["enable_anonymous"] = False
self.get_meeting_from_json(json_data)["imported_at"] = round(time.time())
def get_meeting_from_json(self, json_data: Any) -> Any:
key = next(iter(json_data["meeting"]))
return json_data["meeting"][key]
def create_replace_map(self, json_data: Dict[str, Any]) -> None:
replace_map: Dict[str, Dict[int, int]] = defaultdict(dict)
for collection in json_data:
if not json_data[collection]:
continue
new_ids = self.datastore.reserve_ids(
Collection(collection), len(json_data[collection])
)
for entry, new_id in zip(json_data[collection].values(), new_ids):
replace_map[collection][entry["id"]] = new_id
self.replace_map = replace_map
def replace_fields(self, instance: Dict[str, Any]) -> None:
json_data = instance["meeting"]
new_json_data = {}
for collection in json_data:
new_collection = {}
for entry in json_data[collection].values():
for field in list(entry.keys()):
self.replace_field_ids(collection, entry, field)
new_collection[str(entry["id"])] = entry
new_json_data[collection] = new_collection
instance["meeting"] = new_json_data
def replace_field_ids(
self,
collection: str,
entry: Dict[str, Any],
field: str,
) -> None:
model_field = model_registry[Collection(collection)]().try_get_field(field)
if model_field is None:
raise ActionException(f"{collection}/{field} is not allowed.")
if isinstance(model_field, BaseRelationField):
if isinstance(model_field, BaseGenericRelationField):
content_list = (
content
if isinstance(content := entry.get(field), list)
else [content]
)
target_collections = [
item.split(KEYSEPARATOR)[0] for item in content_list if item
]
else:
target_collections = [k.collection for k in model_field.to.keys()]
if all(c not in self.allowed_collections for c in target_collections):
return
if field == "id":
entry["id"] = self.replace_map[collection][entry["id"]]
elif (
collection == "meeting"
and field == "user_ids"
and "user" in self.allowed_collections
):
entry[field] = [
self.replace_map["user"][id_] for id_ in entry.get(field) or []
]
elif collection == "user" and field == "meeting_ids":
entry[field] = list(self.replace_map["meeting"].values())
elif collection == "motion" and field == "recommendation_extension":
if entry[field]:
fqids_str = RECOMMENDATION_EXTENSION_REFERENCE_IDS_PATTERN.findall(
entry[field]
)
entry_str = entry[field]
entry_list = []
for fqid in fqids_str:
search_str = "[" + fqid + "]"
idx = entry_str.find(search_str)
entry_list.append(entry_str[:idx])
col, id_ = fqid.split(KEYSEPARATOR)
replace_str = (
"["
+ col
+ KEYSEPARATOR
+ str(self.replace_map[col][int(id_)])
+ "]"
)
entry_list.append(replace_str)
entry_str = entry_str[idx + len(replace_str) :]
entry_list.append(entry_str)
entry[field] = "".join(entry_list)
else:
if (
isinstance(model_field, BaseTemplateField)
and model_field.is_template_field(field)
and model_field.replacement_collection
):
entry[field] = [
str(
self.replace_map[model_field.replacement_collection.collection][
int(id_)
]
)
for id_ in entry[field]
]
elif (
isinstance(model_field, BaseTemplateField)
and model_field.is_template_field(field)
and not model_field.replacement_collection
):
pass
elif isinstance(model_field, RelationField):
target_collection = model_field.get_target_collection().collection
if entry[field]:
entry[field] = self.replace_map[target_collection][entry[field]]
elif isinstance(model_field, RelationListField):
target_collection = model_field.get_target_collection().collection
entry[field] = [
self.replace_map[target_collection][id_]
for id_ in entry.get(field) or []
]
elif isinstance(model_field, GenericRelationField):
if entry[field]:
name, id_ = entry[field].split(KEYSEPARATOR)
entry[field] = (
name + KEYSEPARATOR + str(self.replace_map[name][int(id_)])
)
elif isinstance(model_field, GenericRelationListField):
new_fqid_list = []
for fqid in entry[field]:
name, id_ = fqid.split(KEYSEPARATOR)
new_fqid_list.append(
name + KEYSEPARATOR + str(self.replace_map[name][int(id_)])
)
entry[field] = new_fqid_list
if (
isinstance(model_field, BaseTemplateField)
and model_field.replacement_collection
and not model_field.is_template_field(field)
):
replacement = model_field.get_replacement(field)
id_ = int(replacement)
new_id_ = self.replace_map[
model_field.replacement_collection.collection
][id_]
new_field = model_field.get_structured_field_name(new_id_)
tmp = entry[field]
del entry[field]
entry[new_field] = tmp
def update_admin_group(self, data_json: Dict[str, Any]) -> None:
admin_group_id = self.get_meeting_from_json(data_json)["admin_group_id"]
for entry in data_json["group"].values():
if entry["id"] == admin_group_id:
if entry["user_ids"]:
entry["user_ids"].insert(0, self.user_id)
else:
entry["user_ids"] = [self.user_id]
self.get_meeting_from_json(data_json)["user_ids"].insert(0, self.user_id)
def upload_mediadata(self) -> None:
for blob, id_, mimetype in self.mediadata:
replaced_id = self.replace_map["mediafile"][id_]
self.media.upload_mediafile(blob, replaced_id, mimetype)
def create_write_requests(self, instance: Dict[str, Any]) -> Iterable[WriteRequest]:
json_data = instance["meeting"]
meeting_id = self.get_meeting_from_json(json_data)["id"]
write_requests = []
for collection in json_data:
for entry in json_data[collection].values():
fqid = FullQualifiedId(Collection(collection), entry["id"])
write_requests.append(
self.build_write_request(
EventType.Create,
fqid,
f"import meeting {meeting_id}",
entry,
)
)
write_requests.append(
self.build_write_request(
EventType.Update,
FullQualifiedId(
Collection("committee"),
self.get_meeting_from_json(json_data)["committee_id"],
),
f"import meeting {meeting_id}",
None,
{"add": {"meeting_ids": [meeting_id]}, "remove": {}},
)
)
if self.get_meeting_from_json(json_data).get("is_active_in_organization_id"):
write_requests.append(
self.build_write_request(
EventType.Update,
FullQualifiedId(Collection("organization"), 1),
f"import meeting {meeting_id}",
None,
{"add": {"active_meeting_ids": [meeting_id]}, "remove": {}},
)
)
return write_requests
def create_action_result_element(
self, instance: Dict[str, Any]
) -> Optional[ActionResultElement]:
return {"id": self.get_meeting_from_json(instance["meeting"])["id"]}
def check_permissions(self, instance: Dict[str, Any]) -> None:
if not has_committee_management_level(
self.datastore,
self.user_id,
CommitteeManagementLevel.CAN_MANAGE,
instance["committee_id"],
):
raise MissingPermission(CommitteeManagementLevel.CAN_MANAGE)
| true | true |
f73adadd7703c2725598c49857ef6d2b4ce4bf23 | 2,789 | py | Python | tests/integration/test_compression_nested_columns/test.py | pdv-ru/ClickHouse | 0ff975bcf3008fa6c6373cbdfed16328e3863ec5 | [
"Apache-2.0"
] | 15,577 | 2019-09-23T11:57:53.000Z | 2022-03-31T18:21:48.000Z | tests/integration/test_compression_nested_columns/test.py | pdv-ru/ClickHouse | 0ff975bcf3008fa6c6373cbdfed16328e3863ec5 | [
"Apache-2.0"
] | 16,476 | 2019-09-23T11:47:00.000Z | 2022-03-31T23:06:01.000Z | tests/integration/test_compression_nested_columns/test.py | pdv-ru/ClickHouse | 0ff975bcf3008fa6c6373cbdfed16328e3863ec5 | [
"Apache-2.0"
] | 3,633 | 2019-09-23T12:18:28.000Z | 2022-03-31T15:55:48.000Z | import random
import string
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', with_zookeeper=True)
node2 = cluster.add_instance('node2', with_zookeeper=True)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_compression_codec_byte(node, table_name, part_name, filename):
cmd = "tail -c +17 /var/lib/clickhouse/data/default/{}/{}/{}.bin | od -x -N 1 | head -n 1 | awk '{{print $2}}'".format(
table_name, part_name, filename)
return node.exec_in_container(["bash", "-c", cmd]).strip()
CODECS_MAPPING = {
'NONE' : '0002',
'LZ4': '0082',
'LZ4HC': '0082', # not an error, same byte
'ZSTD': '0090',
'Multiple': '0091',
'Delta': '0092',
'T64': '0093',
}
def test_nested_compression_codec(start_cluster):
for i, node in enumerate([node1, node2]):
node.query("""
CREATE TABLE compression_table (
key UInt64,
column_ok Nullable(UInt64) CODEC(Delta, LZ4),
column_array Array(Array(UInt64)) CODEC(T64, LZ4),
column_bad LowCardinality(Int64) CODEC(Delta)
) ENGINE = ReplicatedMergeTree('/t', '{}') ORDER BY tuple() PARTITION BY key
SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0;
""".format(i), settings={"allow_suspicious_codecs" : "1", "allow_suspicious_low_cardinality_types" : "1"})
node1.query("INSERT INTO compression_table VALUES (1, 1, [[77]], 32)")
node2.query("SYSTEM SYNC REPLICA compression_table", timeout=5)
node1.query("DETACH TABLE compression_table")
node2.query("DETACH TABLE compression_table")
node1.query("ATTACH TABLE compression_table")
node2.query("ATTACH TABLE compression_table")
for node in [node1, node2]:
assert get_compression_codec_byte(node, "compression_table", "1_0_0_0", "column_ok") == CODECS_MAPPING['Multiple']
assert get_compression_codec_byte(node, "compression_table", "1_0_0_0", "column_ok.null") == CODECS_MAPPING['LZ4']
assert get_compression_codec_byte(node1, "compression_table", "1_0_0_0", "column_array") == CODECS_MAPPING['Multiple']
assert get_compression_codec_byte(node2, "compression_table", "1_0_0_0", "column_array.size0") == CODECS_MAPPING['LZ4']
assert get_compression_codec_byte(node2, "compression_table", "1_0_0_0", "column_array.size1") == CODECS_MAPPING['LZ4']
assert get_compression_codec_byte(node2, "compression_table", "1_0_0_0", "column_bad.dict") == CODECS_MAPPING['Delta']
assert get_compression_codec_byte(node1, "compression_table", "1_0_0_0", "column_bad") == CODECS_MAPPING['NONE']
| 40.42029 | 127 | 0.687343 | import random
import string
import pytest
from helpers.cluster import ClickHouseCluster
cluster = ClickHouseCluster(__file__)
node1 = cluster.add_instance('node1', with_zookeeper=True)
node2 = cluster.add_instance('node2', with_zookeeper=True)
@pytest.fixture(scope="module")
def start_cluster():
try:
cluster.start()
yield cluster
finally:
cluster.shutdown()
def get_compression_codec_byte(node, table_name, part_name, filename):
cmd = "tail -c +17 /var/lib/clickhouse/data/default/{}/{}/{}.bin | od -x -N 1 | head -n 1 | awk '{{print $2}}'".format(
table_name, part_name, filename)
return node.exec_in_container(["bash", "-c", cmd]).strip()
CODECS_MAPPING = {
'NONE' : '0002',
'LZ4': '0082',
'LZ4HC': '0082',
'ZSTD': '0090',
'Multiple': '0091',
'Delta': '0092',
'T64': '0093',
}
def test_nested_compression_codec(start_cluster):
for i, node in enumerate([node1, node2]):
node.query("""
CREATE TABLE compression_table (
key UInt64,
column_ok Nullable(UInt64) CODEC(Delta, LZ4),
column_array Array(Array(UInt64)) CODEC(T64, LZ4),
column_bad LowCardinality(Int64) CODEC(Delta)
) ENGINE = ReplicatedMergeTree('/t', '{}') ORDER BY tuple() PARTITION BY key
SETTINGS min_rows_for_wide_part = 0, min_bytes_for_wide_part = 0;
""".format(i), settings={"allow_suspicious_codecs" : "1", "allow_suspicious_low_cardinality_types" : "1"})
node1.query("INSERT INTO compression_table VALUES (1, 1, [[77]], 32)")
node2.query("SYSTEM SYNC REPLICA compression_table", timeout=5)
node1.query("DETACH TABLE compression_table")
node2.query("DETACH TABLE compression_table")
node1.query("ATTACH TABLE compression_table")
node2.query("ATTACH TABLE compression_table")
for node in [node1, node2]:
assert get_compression_codec_byte(node, "compression_table", "1_0_0_0", "column_ok") == CODECS_MAPPING['Multiple']
assert get_compression_codec_byte(node, "compression_table", "1_0_0_0", "column_ok.null") == CODECS_MAPPING['LZ4']
assert get_compression_codec_byte(node1, "compression_table", "1_0_0_0", "column_array") == CODECS_MAPPING['Multiple']
assert get_compression_codec_byte(node2, "compression_table", "1_0_0_0", "column_array.size0") == CODECS_MAPPING['LZ4']
assert get_compression_codec_byte(node2, "compression_table", "1_0_0_0", "column_array.size1") == CODECS_MAPPING['LZ4']
assert get_compression_codec_byte(node2, "compression_table", "1_0_0_0", "column_bad.dict") == CODECS_MAPPING['Delta']
assert get_compression_codec_byte(node1, "compression_table", "1_0_0_0", "column_bad") == CODECS_MAPPING['NONE']
| true | true |
f73adb34e962a85c6017136f3cd1d2bd6aea50aa | 145,859 | py | Python | src/pyOpenMS/tests/unittests/test000.py | emilpaulitz/OpenMS | bd19a8cfcfa3bc96f58a1d1e605e094e522c5a6e | [
"BSL-1.0",
"Zlib",
"Apache-2.0"
] | null | null | null | src/pyOpenMS/tests/unittests/test000.py | emilpaulitz/OpenMS | bd19a8cfcfa3bc96f58a1d1e605e094e522c5a6e | [
"BSL-1.0",
"Zlib",
"Apache-2.0"
] | 1 | 2018-10-29T17:15:55.000Z | 2018-11-11T19:29:01.000Z | src/pyOpenMS/tests/unittests/test000.py | emilpaulitz/OpenMS | bd19a8cfcfa3bc96f58a1d1e605e094e522c5a6e | [
"BSL-1.0",
"Zlib",
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import pyopenms
import copy
import os
from pyopenms import String as s
import numpy as np
print("IMPORTED ", pyopenms.__file__)
try:
long
except NameError:
long = int
from functools import wraps
import sys
def _testStrOutput(input_str):
if sys.version_info[0] < 3:
assert isinstance(input_str, unicode)
else:
assert isinstance( input_str, str)
def report(f):
@wraps(f)
def wrapper(*a, **kw):
print("run ", f.__name__)
f(*a, **kw)
return wrapper
@report
def _testMetaInfoInterface(what):
#void getKeys(libcpp_vector[String] & keys)
#void getKeys(libcpp_vector[unsigned int] & keys)
#DataValue getMetaValue(unsigned int) nogil except +
#DataValue getMetaValue(String) nogil except +
#void setMetaValue(unsigned int, DataValue) nogil except +
#void setMetaValue(String, DataValue) nogil except +
#bool metaValueExists(String) nogil except +
#bool metaValueExists(unsigned int) nogil except +
#void removeMetaValue(String) nogil except +
#void removeMetaValue(unsigned int) nogil except +
what.setMetaValue("key", 42)
what.setMetaValue("key2", 42)
keys = []
what.getKeys(keys)
assert len(keys) and all(isinstance(k, bytes) for k in keys)
assert what.getMetaValue(keys[0]) == 42
assert what.metaValueExists("key")
what.removeMetaValue("key")
keys = []
what.getKeys(keys)
assert what.getMetaValue(keys[0]) == 42
what.clearMetaInfo()
keys = []
what.getKeys(keys)
assert len(keys) == 0
@report
def _testUniqueIdInterface(what):
assert what.hasInvalidUniqueId()
assert not what.hasValidUniqueId()
assert what.ensureUniqueId()
assert isinstance(what.getUniqueId(), (int, long))
assert what.getUniqueId() > 0
assert not what.hasInvalidUniqueId()
assert what.hasValidUniqueId()
what.clearUniqueId()
assert what.getUniqueId() == 0
assert what.hasInvalidUniqueId()
assert not what.hasValidUniqueId()
assert what.ensureUniqueId()
assert isinstance(what.getUniqueId(), (int, long))
assert what.getUniqueId() > 0
assert not what.hasInvalidUniqueId()
assert what.hasValidUniqueId()
what.setUniqueId(1234)
assert what.getUniqueId() == 1234
def _testProgressLogger(ff):
"""
@tests: ProgressLogger
ProgressLogger.__init__
ProgressLogger.endProgress
ProgressLogger.getLogType
ProgressLogger.setLogType
ProgressLogger.setProgress
ProgressLogger.startProgress
"""
ff.setLogType(pyopenms.LogType.NONE)
assert ff.getLogType() == pyopenms.LogType.NONE
ff.startProgress(0, 3, "label")
ff.setProgress(0)
ff.setProgress(1)
ff.setProgress(2)
ff.setProgress(3)
ff.endProgress()
@report
def testSpectrumAlignment():
"""
@tests: SpectrumAlignment
SpectrumAlignment.__init__
SpectrumAlignment.getSpectrumAlignment
"""
# test existence of some methods
pyopenms.SpectrumAlignment
pyopenms.SpectrumAlignment.__init__
pyopenms.SpectrumAlignment.getDefaults
pyopenms.SpectrumAlignment.getParameters
pyopenms.SpectrumAlignment.setParameters
spec = pyopenms.MSSpectrum()
p = pyopenms.Peak1D()
p.setMZ(1000.0)
p.setIntensity(200.0)
spec.push_back(p)
p.setMZ(2000.0)
p.setIntensity(200.0)
spec.push_back(p)
rich_spec = pyopenms.MSSpectrum()
p = pyopenms.Peak1D()
p.setMZ(1000.001)
p.setIntensity(200.0)
rich_spec.push_back(p)
p.setMZ(2000.001)
p.setIntensity(200.0)
rich_spec.push_back(p)
p.setMZ(3000.001)
p.setIntensity(200.0)
rich_spec.push_back(p)
aligner = pyopenms.SpectrumAlignment()
result = []
aligner.getSpectrumAlignment(result, spec, spec)
assert result == [ (0,0), (1,1) ], result
aligner.getSpectrumAlignment(result, rich_spec, spec)
assert result == [ (0,0), (1,1) ], result
aligner.getSpectrumAlignment(result, spec, rich_spec)
assert result == [ (0,0), (1,1) ], result
aligner.getSpectrumAlignment(result, rich_spec, rich_spec)
assert result == [ (0,0), (1,1), (2,2) ], result
aligner = pyopenms.SpectrumAlignmentScore()
assert isinstance(aligner(spec), float)
assert isinstance(aligner(rich_spec), float)
assert isinstance(aligner(spec, rich_spec), float)
assert isinstance(aligner(rich_spec, spec), float)
assert isinstance(aligner(spec, spec), float)
assert isinstance(aligner(rich_spec, rich_spec), float)
@report
def testAASequence():
"""
@tests: AASequence
AASequence.__init__
AASequence.__add__
AASequence.__radd__
AASequence.__iadd__
AASequence.getCTerminalModificationName
AASequence.getNTerminalModificationName
AASequence.setCTerminalModification
AASequence.setModification
AASequence.setNTerminalModification
AASequence.toString
AASequence.toUnmodifiedString
"""
aas = pyopenms.AASequence()
aas + aas
aas += aas
aas.__doc__
aas = pyopenms.AASequence.fromString("DFPIANGER")
assert aas.getCTerminalModificationName() == ""
assert aas.getNTerminalModificationName() == ""
aas.setCTerminalModification("")
aas.setNTerminalModification("")
assert aas.toString() == "DFPIANGER"
assert aas.toUnmodifiedString() == "DFPIANGER"
aas = pyopenms.AASequence.fromStringPermissive("DFPIANGER", True)
assert aas.toString() == "DFPIANGER"
assert aas.toUnmodifiedString() == "DFPIANGER"
seq = pyopenms.AASequence.fromString("PEPTIDESEKUEM(Oxidation)CER")
assert seq.toString() == "PEPTIDESEKUEM(Oxidation)CER"
assert seq.toUnmodifiedString() == "PEPTIDESEKUEMCER"
assert seq.toBracketString() == "PEPTIDESEKUEM[147]CER"
assert seq.toBracketString(True) == "PEPTIDESEKUEM[147]CER"
assert seq.toBracketString(False) == "PEPTIDESEKUEM[147.03540001709996]CER" or \
seq.toBracketString(False) == "PEPTIDESEKUEM[147.035400017100017]CER"
assert seq.toBracketString(False) == "PEPTIDESEKUEM[147.03540001709996]CER" or \
seq.toBracketString(False) == "PEPTIDESEKUEM[147.035400017100017]CER"
assert seq.toUniModString() == "PEPTIDESEKUEM(UniMod:35)CER"
assert seq.isModified()
assert not seq.hasCTerminalModification()
assert not seq.hasNTerminalModification()
assert not seq.empty()
# has selenocysteine
assert seq.getResidue(1) is not None
assert seq.size() == 16
# test exception forwarding from C++ to python
# classes derived from std::runtime_exception can be caught in python
try:
seq.getResidue(1000) # does not exist
except RuntimeError:
print("Exception successfully triggered.")
else:
print("Error: Exception not triggered.")
assert False
assert seq.getFormula(pyopenms.Residue.ResidueType.Full, 0) == pyopenms.EmpiricalFormula("C75H122N20O32S2Se1")
assert abs(seq.getMonoWeight(pyopenms.Residue.ResidueType.Full, 0) - 1952.7200317517998) < 1e-5
# assert seq.has(pyopenms.ResidueDB.getResidue("P"))
@report
def testElement():
"""
@tests: Element
Element.__init__
Element.setAtomicNumber
Element.getAtomicNumber
Element.setAverageWeight
Element.getAverageWeight
Element.setMonoWeight
Element.getMonoWeight
Element.setIsotopeDistribution
Element.getIsotopeDistribution
Element.setName
Element.getName
Element.setSymbol
Element.getSymbol
"""
ins = pyopenms.Element()
ins.setAtomicNumber(6)
ins.getAtomicNumber()
ins.setAverageWeight(12.011)
ins.getAverageWeight()
ins.setMonoWeight(12)
ins.getMonoWeight()
iso = pyopenms.IsotopeDistribution()
ins.setIsotopeDistribution(iso)
ins.getIsotopeDistribution()
ins.setName("Carbon")
ins.getName()
ins.setSymbol("C")
ins.getSymbol()
e = pyopenms.Element()
e.setSymbol("blah")
e.setSymbol("blah")
e.setSymbol(u"blah")
e.setSymbol(str("blah"))
oms_string = s("blu")
e.setSymbol(oms_string)
assert oms_string
assert oms_string.toString() == "blu"
evil = u"blü"
evil8 = evil.encode("utf8")
evil1 = evil.encode("latin1")
e.setSymbol(evil.encode("utf8"))
assert e.getSymbol() == u"blü"
e.setSymbol(evil.encode("latin1"))
assert e.getSymbol().decode("latin1") == u"blü"
# If we get the raw symbols, we get bytes (which we would need to decode first)
e.setSymbol(evil8.decode("utf8"))
# assert e.getSymbol() == 'bl\xc3\xbc', e.getSymbol()
assert e.getSymbol() == u"blü" #.encode("utf8")
# OpenMS strings, however, understand the decoding
assert s(e.getSymbol()) == s(u"blü")
assert s(e.getSymbol()).toString() == u"blü"
# What if you use the wrong decoding ?
e.setSymbol(evil1)
assert e.getSymbol().decode("latin1") == u"blü"
e.setSymbol(evil8)
assert e.getSymbol() == u"blü"
@report
def testResidue():
"""
@tests: Residue
Residue.__init__
"""
ins = pyopenms.Residue()
pyopenms.Residue.ResidueType.Full
pyopenms.Residue.ResidueType.Internal
pyopenms.Residue.ResidueType.NTerminal
pyopenms.Residue.ResidueType.CTerminal
pyopenms.Residue.ResidueType.AIon
pyopenms.Residue.ResidueType.BIon
pyopenms.Residue.ResidueType.CIon
pyopenms.Residue.ResidueType.XIon
pyopenms.Residue.ResidueType.YIon
pyopenms.Residue.ResidueType.ZIon
pyopenms.Residue.ResidueType.SizeOfResidueType
@report
def testIsotopeDistribution():
"""
@tests: IsotopeDistribution
IsotopeDistribution.__init__
"""
ins = pyopenms.IsotopeDistribution()
ins.getMax()
ins.getMin()
ins.size()
ins.clear()
ins.renormalize()
ins.trimLeft(6.0)
ins.trimRight(8.0)
ins.clear()
ins.insert(1, 2)
ins.insert(6, 5)
assert ins.size() == 2
for p in ins:
print(p)
@report
def testFineIsotopePatternGenerator():
"""
@tests: FineIsotopePatternGenerator
"""
iso = pyopenms.FineIsotopePatternGenerator()
iso.setThreshold(1e-5)
iso.setAbsolute(True)
assert iso.getAbsolute()
methanol = pyopenms.EmpiricalFormula("CH3OH")
water = pyopenms.EmpiricalFormula("H2O")
mw = methanol + water
iso_dist = mw.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-20, False, False))
assert len(iso_dist.getContainer()) == 56
iso_dist = mw.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-200, False, False))
assert len(iso_dist.getContainer()) == 84
c100 = pyopenms.EmpiricalFormula("C100")
iso_dist = c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-200, False, False))
assert len(iso_dist.getContainer()) == 101
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-2, False, False)).size() == 6
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-2, False, True)).size() == 5
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-2, True, False)).size() == 5
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-2, True, True)).size() == 5
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-10, False, False)).size() == 14
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-10, False, True)).size() == 13
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-10, True, False)).size() == 10
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-10, True, True)).size() == 10
iso = pyopenms.FineIsotopePatternGenerator(1e-5, False, False)
isod = iso.run(methanol)
assert len(isod.getContainer()) == 6
assert abs(isod.getContainer()[0].getMZ() - 32.0262151276) < 1e-5
assert isod.getContainer()[0].getIntensity() - 0.986442089081 < 1e-5
@report
def testCoarseIsotopePatternGenerator():
"""
@tests: CoarseIsotopePatternGenerator
CoarseIsotopePatternGenerator.__init__
CoarseIsotopePatternGenerator.getMaxIsotope()
CoarseIsotopePatternGenerator.setMaxIsotope()
CoarseIsotopePatternGenerator.estimateFromPeptideWeight()
"""
iso = pyopenms.CoarseIsotopePatternGenerator()
iso.setMaxIsotope(5)
assert iso.getMaxIsotope() == 5
res = iso.estimateFromPeptideWeight(500)
methanol = pyopenms.EmpiricalFormula("CH3OH")
water = pyopenms.EmpiricalFormula("H2O")
mw = methanol + water
iso_dist = mw.getIsotopeDistribution(pyopenms.CoarseIsotopePatternGenerator(3))
assert len(iso_dist.getContainer()) == 3, len(iso_dist.getContainer())
iso_dist = mw.getIsotopeDistribution(pyopenms.CoarseIsotopePatternGenerator(0))
assert len(iso_dist.getContainer()) == 18, len(iso_dist.getContainer())
iso = pyopenms.CoarseIsotopePatternGenerator(10)
isod = iso.run(methanol)
assert len(isod.getContainer()) == 10, len(isod.getContainer())
assert abs(isod.getContainer()[0].getMZ() - 32.0262151276) < 1e-5
assert isod.getContainer()[0].getIntensity() - 0.986442089081 < 1e-5
@report
def testEmpiricalFormula():
"""
@tests: EmpiricalFormula
EmpiricalFormula.__init__
EmpiricalFormula.getMonoWeight
EmpiricalFormula.getAverageWeight
EmpiricalFormula.getIsotopeDistribution
EmpiricalFormula.getNumberOfAtoms
EmpiricalFormula.setCharge
EmpiricalFormula.getCharge
EmpiricalFormula.toString
EmpiricalFormula.isEmpty
EmpiricalFormula.isCharged
EmpiricalFormula.hasElement
EmpiricalFormula.hasElement
"""
ins = pyopenms.EmpiricalFormula()
ins.getMonoWeight()
ins.getAverageWeight()
ins.getIsotopeDistribution(pyopenms.CoarseIsotopePatternGenerator(0))
# ins.getNumberOf(0)
# ins.getNumberOf("test")
ins.getNumberOfAtoms()
ins.setCharge(2)
ins.getCharge()
ins.toString()
ins.isEmpty()
ins.isCharged()
ins.hasElement( pyopenms.Element() )
ef = pyopenms.EmpiricalFormula("C2H5")
s = ef.toString()
assert s == "C2H5"
m = ef.getElementalComposition()
assert m[b"C"] == 2
assert m[b"H"] == 5
assert ef.getNumberOfAtoms() == 7
@report
def testIdentificationHit():
"""
@tests: IdentificationHit
IdentificationHit.__init__
"""
f = pyopenms.IdentificationHit()
_testMetaInfoInterface(f)
assert pyopenms.IdentificationHit().setId is not None
assert pyopenms.IdentificationHit().getId is not None
assert pyopenms.IdentificationHit().setCharge is not None
assert pyopenms.IdentificationHit().getCharge is not None
assert pyopenms.IdentificationHit().setCalculatedMassToCharge is not None
assert pyopenms.IdentificationHit().getCalculatedMassToCharge is not None
assert pyopenms.IdentificationHit().setExperimentalMassToCharge is not None
assert pyopenms.IdentificationHit().getExperimentalMassToCharge is not None
assert pyopenms.IdentificationHit().setName is not None
assert pyopenms.IdentificationHit().getName is not None
assert pyopenms.IdentificationHit().setPassThreshold is not None
assert pyopenms.IdentificationHit().getPassThreshold is not None
assert pyopenms.IdentificationHit().setRank is not None
assert pyopenms.IdentificationHit().getRank is not None
f.setId("test_id")
assert f.getId() == "test_id"
f.setId("test_id")
assert f.getId() == "test_id"
f.setCharge(5)
assert f.getCharge() == 5
f.setCalculatedMassToCharge(5.0)
assert f.getCalculatedMassToCharge() == 5.0
f.setExperimentalMassToCharge(5.0)
assert f.getExperimentalMassToCharge() == 5.0
f.setName("test")
assert f.getName() == "test"
f.setPassThreshold(True)
assert f.getPassThreshold() == True
f.setRank(42)
assert f.getRank() == 42
@report
def testSpectrumIdentification():
"""
@tests: SpectrumIdentification
SpectrumIdentification.__init__
"""
f = pyopenms.SpectrumIdentification()
_testMetaInfoInterface(f)
assert pyopenms.SpectrumIdentification().setHits is not None
assert pyopenms.SpectrumIdentification().addHit is not None
assert pyopenms.SpectrumIdentification().getHits is not None
hit = pyopenms.IdentificationHit()
hit.setName("test1")
f.addHit(hit)
hit = pyopenms.IdentificationHit()
hit.setName("test2")
f.addHit(hit)
all_hits = f.getHits()
assert len(all_hits) == 2
assert "test1" in [h.getName() for h in all_hits]
assert "test2" in [h.getName() for h in all_hits]
@report
def testIdentification():
"""
@tests: Identification
Identification.__init__
"""
f = pyopenms.Identification()
_testMetaInfoInterface(f)
assert pyopenms.Identification().setCreationDate is not None
assert pyopenms.Identification().getCreationDate is not None
assert pyopenms.Identification().setSpectrumIdentifications is not None
assert pyopenms.Identification().addSpectrumIdentification is not None
assert pyopenms.Identification().getSpectrumIdentifications is not None
id1 = pyopenms.SpectrumIdentification()
f.addSpectrumIdentification(id1)
assert len(f.getSpectrumIdentifications()) == 1
id2 = pyopenms.SpectrumIdentification()
f.addSpectrumIdentification(id2)
assert len(f.getSpectrumIdentifications()) == 2
@report
def testModificationDefinitionsSet():
"""
@tests: ModificationDefinitionsSet
ModificationDefinitionsSet.__init__
"""
empty = pyopenms.ModificationDefinitionsSet()
fixed = [b"Carbamidomethyl"]
variable = [b"Oxidation"]
full = pyopenms.ModificationDefinitionsSet(fixed, variable)
@report
def test_AcquisitionInfo():
"""
@tests: AcquisitionInfo
AcquisitionInfo.__init__
AcquisitionInfo.__eq__
AcquisitionInfo.__ge__
AcquisitionInfo.__gt__
AcquisitionInfo.__le__
AcquisitionInfo.__lt__
AcquisitionInfo.__ne__
AcquisitionInfo.getMethodOfCombination
AcquisitionInfo.setMethodOfCombination
"""
ai = pyopenms.AcquisitionInfo()
ai.__doc__
assert ai == ai
assert not ai != ai
ai.setMethodOfCombination("ABC")
assert ai.getMethodOfCombination() == "ABC"
@report
def test_BaseFeature():
"""
@tests: BaseFeature
BaseFeature.__init__
BaseFeature.clearUniqueId
BaseFeature.ensureUniqueId
BaseFeature.getCharge
BaseFeature.getKeys
BaseFeature.getMetaValue
BaseFeature.getQuality
BaseFeature.getUniqueId
BaseFeature.getWidth
BaseFeature.hasInvalidUniqueId
BaseFeature.hasValidUniqueId
BaseFeature.metaValueExists
BaseFeature.removeMetaValue
BaseFeature.setCharge
BaseFeature.setMetaValue
BaseFeature.setQuality
BaseFeature.setWidth
BaseFeature.clearMetaInfo
BaseFeature.setUniqueId
"""
bf = pyopenms.BaseFeature()
_testMetaInfoInterface(bf)
_testUniqueIdInterface(bf)
bf.clearUniqueId()
assert bf.ensureUniqueId()
assert bf.getCharge() == 0
assert isinstance(bf.getQuality(), float)
assert isinstance(bf.getUniqueId(), (long, int))
assert isinstance(bf.getWidth(), float)
assert not bf.hasInvalidUniqueId()
assert bf.hasValidUniqueId()
_testMetaInfoInterface(bf)
bf.setCharge(1)
bf.setQuality(0.0)
bf.setWidth(1.0)
@report
def test_AnnotationState():
"""
@tests: AnnotationState
AnnotationState.__init__
"""
state = pyopenms.AnnotationState()
assert state.FEATURE_ID_NONE is not None
assert state.FEATURE_ID_SINGLE is not None
assert state.FEATURE_ID_MULTIPLE_SAME is not None
assert state.FEATURE_ID_MULTIPLE_DIVERGENT is not None
assert state.SIZE_OF_ANNOTATIONSTATE is not None
@report
def testChecksumType():
"""
@tests: ChecksumType
ChecksumType.MD5
ChecksumType.SHA1
ChecksumType.SIZE_OF_CHECKSUMTYPE
ChecksumType.UNKNOWN_CHECKSUM
"""
assert isinstance(pyopenms.ChecksumType.MD5, int)
assert isinstance(pyopenms.ChecksumType.SHA1, int)
assert isinstance(pyopenms.ChecksumType.SIZE_OF_CHECKSUMTYPE, int)
assert isinstance(pyopenms.ChecksumType.UNKNOWN_CHECKSUM, int)
@report
def testChromatogramPeak():
"""
@tests: ChromatogramPeak
ChromatogramPeak.__init__
ChromatogramPeak.__eq__
ChromatogramPeak.__ge__
ChromatogramPeak.__gt__
ChromatogramPeak.__le__
ChromatogramPeak.__lt__
ChromatogramPeak.__ne__
ChromatogramPeak.getIntensity
ChromatogramPeak.getRT
ChromatogramPeak.setIntensity
ChromatogramPeak.setRT
"""
p = pyopenms.ChromatogramPeak()
assert p == p
assert not p != p
p.setIntensity(12.0)
p.setRT(34.0)
assert p.getIntensity() == 12.0
assert p.getRT() == 34.0
@report
def testChromatogramToosl():
"""
@tests: ChromatogramTools
ChromatogramTools.__init__
ChromatogramTools.convertChromatogramsToSpectra
ChromatogramTools.convertSpectraToChromatograms
"""
pyopenms.ChromatogramTools()
pyopenms.ChromatogramTools.convertChromatogramsToSpectra
pyopenms.ChromatogramTools.convertSpectraToChromatograms
@report
def testConsensusFeature():
"""
@tests: ConsensusFeature
ConsensusFeature.__eq__
ConsensusFeature.__ge__
ConsensusFeature.__gt__
ConsensusFeature.__le__
ConsensusFeature.__lt__
ConsensusFeature.__ne__
ConsensusFeature.__init__
ConsensusFeature.clearUniqueId
ConsensusFeature.computeConsensus
ConsensusFeature.computeDechargeConsensus
ConsensusFeature.computeMonoisotopicConsensus
ConsensusFeature.ensureUniqueId
ConsensusFeature.getCharge
ConsensusFeature.getKeys
ConsensusFeature.getMetaValue
ConsensusFeature.getQuality
ConsensusFeature.getUniqueId
ConsensusFeature.getWidth
ConsensusFeature.hasInvalidUniqueId
ConsensusFeature.hasValidUniqueId
ConsensusFeature.insert
ConsensusFeature.metaValueExists
ConsensusFeature.removeMetaValue
ConsensusFeature.setCharge
ConsensusFeature.setMetaValue
ConsensusFeature.setQuality
ConsensusFeature.setWidth
ConsensusFeature.clearMetaInfo
ConsensusFeature.setUniqueId
ConsensusFeature.size
ConsensusFeature.getPeptideIdentifications
ConsensusFeature.setPeptideIdentifications
"""
f = pyopenms.ConsensusFeature()
f_ = copy.copy(f)
assert f_ == f
f_ = copy.deepcopy(f)
assert f_ == f
f_ = pyopenms.ConsensusFeature(f)
assert f_ == f
_testUniqueIdInterface(f)
_testMetaInfoInterface(f)
f.setCharge(1)
f.setQuality(2.0)
f.setWidth(4.0)
assert f.getCharge() == 1
assert f.getQuality() == 2.0
assert f.getWidth() == 4.0
f.insert(0, pyopenms.Peak2D(), 1)
f.insert(1, pyopenms.BaseFeature())
f.insert(2, pyopenms.ConsensusFeature())
f.computeConsensus()
f.computeDechargeConsensus
f.computeMonoisotopicConsensus()
assert f.size() >= 0
p = f.getPeptideIdentifications()
f.setPeptideIdentifications(p)
@report
def testConsensusMap():
"""
@tests: ConsensusMap
ConsensusMap.__eq__
ConsensusMap.__ge__
ConsensusMap.__gt__
ConsensusMap.__init__
ConsensusMap.__iter__
ConsensusMap.__le__
ConsensusMap.__lt__
ConsensusMap.__ne__
ConsensusMap.clear
ConsensusMap.clearUniqueId
ConsensusMap.ensureUniqueId
ConsensusMap.getDataProcessing
ConsensusMap.getColumnHeaders
ConsensusMap.getProteinIdentifications
ConsensusMap.getUnassignedPeptideIdentifications
ConsensusMap.getUniqueId
ConsensusMap.hasInvalidUniqueId
ConsensusMap.hasValidUniqueId
ConsensusMap.setDataProcessing
ConsensusMap.setColumnHeaders
ConsensusMap.setProteinIdentifications
ConsensusMap.setUnassignedPeptideIdentifications
ConsensusMap.setUniqueId
ConsensusMap.setUniqueIds
ConsensusMap.size
ConsensusMap.sortByIntensity
ConsensusMap.sortByMZ
ConsensusMap.sortByMaps
ConsensusMap.sortByPosition
ConsensusMap.sortByQuality
ConsensusMap.sortByRT
ConsensusMap.sortBySize
ConsensusMap.updateRanges
"""
m = pyopenms.ConsensusMap()
m_ = copy.copy(m)
assert m_ == m
m_ = copy.deepcopy(m)
assert m_ == m
m_ = pyopenms.ConsensusMap(m)
assert m_ == m
m.clear()
m.clearUniqueId()
m.ensureUniqueId()
m.getDataProcessing()
m.getColumnHeaders()
m.getProteinIdentifications()
m.getUnassignedPeptideIdentifications()
m.getUniqueId()
m.hasInvalidUniqueId()
m.hasValidUniqueId()
m.setDataProcessing
m.setColumnHeaders
m.setProteinIdentifications
m.setUnassignedPeptideIdentifications
m.setUniqueId
m.setUniqueIds
m.size()
m.sortByIntensity()
m.sortByMZ()
m.sortByMaps()
m.sortByPosition()
m.sortByQuality()
m.sortByRT()
m.sortBySize()
m.updateRanges()
assert isinstance(m.getMin()[0], float)
assert isinstance(m.getMin()[0], float)
assert isinstance(m.getMax()[1], float)
assert isinstance(m.getMax()[1], float)
assert isinstance(m.getMinInt(), float)
assert isinstance(m.getMaxInt(), float)
m.getIdentifier()
m.getLoadedFileType()
m.getLoadedFilePath()
assert m == m
assert not m != m
@report
def testConsensusXMLFile():
"""
@tests: ConsensusXMLFile
ConsensusXMLFile.__init__
ConsensusXMLFile.getOptions
ConsensusXMLFile.load
ConsensusXMLFile.store
"""
f = pyopenms.ConsensusXMLFile()
f.getOptions()
assert f.load is not None
assert f.store is not None
@report
def testXTandemXMLFile():
"""
@tests: XTandemXMLFile
XTandemXMLFile.__init__
XTandemXMLFile.load
XTandemXMLFile.setModificationDefinitionsSet
"""
f = pyopenms.XTandemXMLFile()
assert f.load is not None
@report
def testXTandemInfile():
"""
"""
f = pyopenms.XTandemInfile()
f.setFragmentMassTolerance is not None
f.getFragmentMassTolerance is not None
f.setPrecursorMassTolerancePlus is not None
f.getPrecursorMassTolerancePlus is not None
f.setPrecursorMassToleranceMinus is not None
f.getPrecursorMassToleranceMinus is not None
f.setPrecursorErrorType is not None
f.getPrecursorErrorType is not None
f.setFragmentMassErrorUnit is not None
f.getFragmentMassErrorUnit is not None
f.setPrecursorMassErrorUnit is not None
f.getPrecursorMassErrorUnit is not None
f.setNumberOfThreads is not None
f.getNumberOfThreads is not None
f.setModifications is not None
f.getModifications is not None
f.setOutputFilename is not None
f.getOutputFilename is not None
f.setInputFilename is not None
f.getInputFilename is not None
f.setTaxonomyFilename is not None
f.getTaxonomyFilename is not None
f.setDefaultParametersFilename is not None
f.getDefaultParametersFilename is not None
f.setTaxon("testTaxon")
assert f.getTaxon() == "testTaxon"
assert f.setMaxPrecursorCharge is not None
assert f.getMaxPrecursorCharge is not None
assert f.setNumberOfMissedCleavages is not None
assert f.getNumberOfMissedCleavages is not None
assert f.setMaxValidEValue is not None
assert f.getMaxValidEValue is not None
assert f.setSemiCleavage is not None
assert f.setAllowIsotopeError is not None
assert f.write is not None
assert f.setCleavageSite is not None
assert f.getCleavageSite is not None
@report
def testSignalToNoiseEstimatorMedian():
"""
@tests: SignalToNoiseEstimatorMedian
SignalToNoiseEstimatorMedian.__init__
"""
f = pyopenms.SignalToNoiseEstimatorMedian()
assert f.init is not None
assert f.getSignalToNoise is not None
@report
def testSignalToNoiseEstimatorMedianChrom():
"""
@tests: SignalToNoiseEstimatorMedianChrom
SignalToNoiseEstimatorMedianChrom.__init__
"""
f = pyopenms.SignalToNoiseEstimatorMedianChrom()
assert f.init is not None
assert f.getSignalToNoise is not None
@report
def testConvexHull2D():
"""
@tests: ConvexHull2D
ConvexHull2D.__eq__
ConvexHull2D.__ge__
ConvexHull2D.__gt__
ConvexHull2D.__init__
ConvexHull2D.__le__
ConvexHull2D.__lt__
ConvexHull2D.__ne__
ConvexHull2D.clear
"""
ch = pyopenms.ConvexHull2D()
ch.clear()
assert ch == ch
@report
def testDataProcessing(dp=pyopenms.DataProcessing()):
"""
@tests: DataProcessing
DataProcessing.__init__
DataProcessing.getKeys
DataProcessing.getMetaValue
DataProcessing.getProcessingActions
DataProcessing.getSoftware
DataProcessing.isMetaEmpty
DataProcessing.metaValueExists
DataProcessing.removeMetaValue
DataProcessing.setCompletionTime
DataProcessing.setMetaValue
DataProcessing.setProcessingActions
DataProcessing.setSoftware
DataProcessing.__eq__
DataProcessing.__ge__
DataProcessing.__gt__
DataProcessing.__le__
DataProcessing.__lt__
DataProcessing.__ne__
DataProcessing.clearMetaInfo
DataProcessing.getCompletionTime
"""
_testMetaInfoInterface(dp)
assert dp == dp
assert not dp != dp
# assert isinstance(dp.getCompletionTime().getDate(), bytes)
# assert isinstance(dp.getCompletionTime().getTime(), bytes)
dp.clearMetaInfo()
k = []
dp.getKeys(k)
assert k == []
dp.getMetaValue
ac = dp.getProcessingActions()
assert ac == set(())
ac = set([ pyopenms.ProcessingAction.PEAK_PICKING, pyopenms.ProcessingAction.BASELINE_REDUCTION])
dp.setProcessingActions(ac)
assert len(dp.getProcessingActions() ) == 2
_testStrOutput(dp.getSoftware().getName())
_testStrOutput(dp.getSoftware().getVersion())
dp.isMetaEmpty()
dp.metaValueExists
dp.removeMetaValue
# dp.setCompletionTime(pyopenms.DateTime.now())
s = dp.getSoftware()
s.setName("pyopenms")
dp.setSoftware(s)
assert dp.getSoftware().getName() == "pyopenms"
@report
def testDataType():
"""
@tests: DataType
DataType.DOUBLE_LIST
DataType.DOUBLE_VALUE
DataType.EMPTY_VALUE
DataType.INT_LIST
DataType.INT_VALUE
DataType.STRING_LIST
DataType.STRING_VALUE
"""
assert isinstance(pyopenms.DataType.DOUBLE_LIST, int)
assert isinstance(pyopenms.DataType.DOUBLE_VALUE, int)
assert isinstance(pyopenms.DataType.EMPTY_VALUE, int)
assert isinstance(pyopenms.DataType.INT_LIST, int)
assert isinstance(pyopenms.DataType.INT_VALUE, int)
assert isinstance(pyopenms.DataType.STRING_LIST, int)
assert isinstance(pyopenms.DataType.STRING_VALUE, int)
@report
def testDataValue():
"""
@tests: DataValue
DataValue.__init__
DataValue.isEmpty
DataValue.toDoubleList
DataValue.toDouble
DataValue.toInt
DataValue.toIntList
DataValue.toString
DataValue.toStringList
DataValue.valueType
"""
a = pyopenms.DataValue()
assert a.isEmpty()
a = pyopenms.DataValue(1)
assert not a.isEmpty()
assert a.toInt() == 1
assert a.valueType() == pyopenms.DataType.INT_VALUE
a = pyopenms.DataValue(1.0)
assert not a.isEmpty()
assert a.toDouble() == 1.0
assert a.valueType() == pyopenms.DataType.DOUBLE_VALUE
a = pyopenms.DataValue("1")
assert not a.isEmpty()
assert a.toString() == "1"
assert a.valueType() == pyopenms.DataType.STRING_VALUE
a = pyopenms.DataValue([1])
assert not a.isEmpty()
assert a.toIntList() == [1]
assert a.valueType() == pyopenms.DataType.INT_LIST
a = pyopenms.DataValue([1.0])
assert not a.isEmpty()
assert a.toDoubleList() == [1.0]
assert a.valueType() == pyopenms.DataType.DOUBLE_LIST
a = pyopenms.DataValue([b"1.0"])
assert not a.isEmpty()
assert a.toStringList() == [b"1.0"]
assert a.valueType() == pyopenms.DataType.STRING_LIST
assert pyopenms.MSSpectrum().getMetaValue("nonexisingkey") is None
@report
def testAdduct():
"""
@tests: Adduct
Adduct.__init__
"""
a = pyopenms.Adduct()
@report
def testGaussFitter():
"""
@tests: GaussFitter
GaussFitter.__init__
"""
ins = pyopenms.GaussFitter()
@report
def testGaussFitResult():
"""
@tests: GaussFitResult
GaussFitResult.__init__
"""
ins = pyopenms.GaussFitResult(0.0, 0.0, 0.0)
ins.A = 5.0
ins.x0 = 5.0
ins.sigma = 5.0
@report
def testChargePair():
"""
@tests: ChargePair
ChargePair.__init__
"""
a = pyopenms.ChargePair()
@report
def testCompomer():
"""
@tests: Compomer
Compomer.__init__
"""
a = pyopenms.Compomer()
@report
def testCVMappings():
"""
@tests: CVMappings
CVMappings.__init__
"""
val = pyopenms.CVMappings()
@report
def testCVMappingFile():
"""
@tests: CVMappingFile
CVMappingFile.__init__
"""
val = pyopenms.CVMappingFile()
assert pyopenms.CVMappingFile().load
@report
def testControlledVocabulary():
"""
@tests: ControlledVocabulary
ControlledVocabulary.__init__
"""
val = pyopenms.ControlledVocabulary()
assert pyopenms.ControlledVocabulary().loadFromOBO
@report
def testSemanticValidator():
"""
@tests: SemanticValidator
SemanticValidator.__init__
"""
m = pyopenms.CVMappings()
cv = pyopenms.ControlledVocabulary()
val = pyopenms.SemanticValidator(m, cv)
assert val.validate is not None
assert val.setCheckTermValueTypes is not None
assert val.setCheckUnits is not None
# @report
# def testDateTime():
# """
# @tests: DateTime
# DateTime.__init__
# DateTime.getDate
# DateTime.getTime
# DateTime.now
# """
# d = pyopenms.DateTime()
# assert isinstance( d.getDate(), bytes)
# assert isinstance( d.getTime(), bytes)
# d = pyopenms.DateTime.now()
# assert isinstance( d.getDate(), bytes)
# assert isinstance( d.getTime(), bytes)
#
# d.clear()
# d.set("01.01.2001 11:11:11")
# assert d.get() == "2001-01-01 11:11:11"
@report
def testFeature():
"""
@tests: Feature
Feature.__init__
Feature.clearUniqueId
Feature.ensureUniqueId
Feature.getCharge
Feature.getIntensity
Feature.getKeys
Feature.getMZ
Feature.getMetaValue
Feature.getQuality
Feature.getRT
Feature.getUniqueId
Feature.getWidth
Feature.hasInvalidUniqueId
Feature.hasValidUniqueId
Feature.metaValueExists
Feature.removeMetaValue
Feature.setCharge
Feature.setIntensity
Feature.setMZ
Feature.setMetaValue
Feature.setQuality
Feature.setRT
Feature.setWidth
Feature.__eq__
Feature.__ge__
Feature.__gt__
Feature.__le__
Feature.__lt__
Feature.__ne__
Feature.clearMetaInfo
Feature.getConvexHulls
Feature.getSubordinates
Feature.setConvexHulls
Feature.setSubordinates
Feature.setUniqueId
Feature.getPeptideIdentifications
Feature.setPeptideIdentifications
"""
f = pyopenms.Feature()
_testMetaInfoInterface(f)
_testUniqueIdInterface(f)
f.setConvexHulls(f.getConvexHulls())
f.setSubordinates(f.getSubordinates())
f.setUniqueId(12345)
assert f == f
assert not f != f
f.setCharge(-1)
assert f.getCharge() == -1
f.setIntensity(10.0)
assert f.getIntensity() == 10.0
f.setQuality(0, 20.0)
assert f.getQuality(0) == 20.0
f.setRT(30.0)
assert f.getRT() == 30.0
f.setWidth(40.0)
assert f.getWidth() == 40.0
p = f.getPeptideIdentifications()
f.setPeptideIdentifications(p)
@report
def testFeatureFinder():
"""
@tests: FeatureFinder
FeatureFinder.__init__
FeatureFinder.endProgress
FeatureFinder.getLogType
FeatureFinder.getParameters
FeatureFinder.run
FeatureFinder.setLogType
FeatureFinder.setProgress
FeatureFinder.startProgress
"""
ff = pyopenms.FeatureFinder()
name = pyopenms.FeatureFinderAlgorithmPicked.getProductName()
ff.run(name, pyopenms.MSExperiment(), pyopenms.FeatureMap() ,
pyopenms.Param(), pyopenms.FeatureMap())
_testProgressLogger(ff)
p = ff.getParameters(name)
_testParam(p)
@report
def testFeatureFileOptions():
"""
@tests: FeatureFileOptions
FeatureFileOptions.__init__
FeatureFileOptions.getLoadConvexHull
FeatureFileOptions.getLoadSubordinates
FeatureFileOptions.getMetadataOnly
FeatureFileOptions.getSizeOnly
FeatureFileOptions.setLoadConvexHull
FeatureFileOptions.setLoadSubordinates
FeatureFileOptions.setMetadataOnly
FeatureFileOptions.setSizeOnly
"""
fo = pyopenms.FeatureFileOptions()
fo.getLoadConvexHull()
fo.getLoadSubordinates()
fo.getSizeOnly()
assert fo.setLoadConvexHull is not None
assert fo.setLoadSubordinates is not None
assert fo.setMetadataOnly is not None
assert fo.setSizeOnly is not None
@report
def _testParam(p):
"""
@tests: Param
Param.__init__
Param.addTag
Param.addTags
Param.asDict
Param.clearTags
Param.copy
Param.exists
Param.get
Param.getDescription
Param.getEntry
Param.getSectionDescription
Param.getTags
Param.getValue
Param.hasTag
Param.insert
Param.setMaxFloat
Param.setMaxInt
Param.setMinFloat
Param.setMinInt
Param.setSectionDescription
Param.setValidStrings
Param.setValue
Param.size
Param.update
Param.__eq__
Param.__ge__
Param.__gt__
Param.__le__
Param.__lt__
Param.__ne__
ParamEntry.__init__
ParamEntry.description
ParamEntry.isValid
ParamEntry.max_float
ParamEntry.max_int
ParamEntry.min_float
ParamEntry.min_int
ParamEntry.name
ParamEntry.tags
ParamEntry.valid_strings
ParamEntry.value
ParamEntry.__eq__
ParamEntry.__ge__
ParamEntry.__gt__
ParamEntry.__le__
ParamEntry.__lt__
ParamEntry.__ne__
"""
assert p == p
dd = p.asDict()
assert len(dd) == p.size()
assert isinstance(dd, dict)
for k in p.keys():
#value = p.getValue(k)
value = p[k]
p[k] = value
p.update(p)
p.update(p.asDict())
assert p[k] == value
desc = p.getDescription(k)
tags = p.getTags(k)
p.setValue(k, value, desc, tags)
p.setValue(k, value, desc)
assert p.exists(k)
# only set the section description if there are actually two or more sections
if len(k.split(b":")) < 2: continue
f = k.split(b":")[0]
p.setSectionDescription(f, k)
# TODO: keys inside maps are not yet properly decoded
assert p.getSectionDescription(f) == k.decode()
assert p.get(k) is not None
assert len(p.values()) == len([p[k] for k in p.keys()])
assert sorted(p.items()) == sorted((k, p[k]) for k in p.keys())
assert not p.exists("asdflkj01231321321v")
p.addTag(k, "a")
p.addTags(k, [b"", b"c"])
assert sorted(p.getTags(k)) == [b"", b"a", b"c"]
p.clearTags(k)
assert p.getTags(k) == []
pn = pyopenms.Param()
pn.insert("master:", p)
assert pn.exists(b"master:"+k)
p1 = pn.copy("master:", True)
assert p1 == p
p1.update(p)
p1.update(p,0)
p1.update(p,1)
p1.update(dd)
p.setValidStrings
p.setMinFloat
p.setMaxFloat
p.setMinInt
p.setMaxInt
ph = pyopenms.ParamXMLFile()
ph.store("test.ini", p)
p1 = pyopenms.Param()
ph.load("test.ini", p1)
assert p == p1
e1 = p1.getEntry(k)
for f in ["name", "description", "value", "tags", "valid_strings",
"min_float", "max_float", "min_int", "max_int"]:
assert getattr(e1, f) is not None
assert e1 == e1
assert p1.get(b"abcde", 7) == 7
@report
def testFeatureFinderAlgorithmPicked():
"""
@tests: FeatureFinderAlgorithmPicked
FeatureFinderAlgorithmPicked.__init__
FeatureFinderAlgorithmPicked.getDefaults
FeatureFinderAlgorithmPicked.getName
FeatureFinderAlgorithmPicked.getParameters
FeatureFinderAlgorithmPicked.getProductName
FeatureFinderAlgorithmPicked.setName
FeatureFinderAlgorithmPicked.setParameters
"""
ff = pyopenms.FeatureFinderAlgorithmPicked()
p = ff.getDefaults()
_testParam(p)
_testParam(ff.getParameters())
assert ff.getName() == "FeatureFinderAlgorithm"
assert pyopenms.FeatureFinderAlgorithmPicked.getProductName() == "centroided"
ff.setParameters(pyopenms.Param())
ff.setName("test")
assert ff.getName() == "test"
@report
def testFeatureFinderAlgorithmIsotopeWavelet():
"""
@tests: FeatureFinderAlgorithmIsotopeWavelet
FeatureFinderAlgorithmIsotopeWavelet.__init__
FeatureFinderAlgorithmIsotopeWavelet.getDefaults
FeatureFinderAlgorithmIsotopeWavelet.getName
FeatureFinderAlgorithmIsotopeWavelet.getParameters
FeatureFinderAlgorithmIsotopeWavelet.getProductName
FeatureFinderAlgorithmIsotopeWavelet.setName
FeatureFinderAlgorithmIsotopeWavelet.setParameters
"""
ff = pyopenms.FeatureFinderAlgorithmIsotopeWavelet()
p = ff.getDefaults()
_testParam(p)
# _testParam(ff.getParameters())
assert ff.getName() == "FeatureFinderAlgorithm"
assert pyopenms.FeatureFinderAlgorithmIsotopeWavelet.getProductName() == "isotope_wavelet"
ff.setParameters(pyopenms.Param())
ff.setName("test")
assert ff.getName() == "test"
@report
def testCompNovoIdentification():
"""
@tests: CompNovoIdentification
CompNovoIdentification.__init__
"""
ff = pyopenms.CompNovoIdentification()
p = ff.getDefaults()
_testParam(p)
assert pyopenms.CompNovoIdentification().getIdentification is not None
assert pyopenms.CompNovoIdentification().getIdentifications is not None
@report
def testCompNovoIdentificationCID():
"""
@tests: CompNovoIdentificationCID
CompNovoIdentificationCID.__init__
"""
ff = pyopenms.CompNovoIdentificationCID()
p = ff.getDefaults()
_testParam(p)
assert pyopenms.CompNovoIdentificationCID().getIdentification is not None
assert pyopenms.CompNovoIdentificationCID().getIdentifications is not None
@report
def testExperimentalSettings():
"""
@tests: ExperimentalSettings
ExperimentalSettings.__init__
"""
ff = pyopenms.ExperimentalSettings()
@report
def testFeatureDeconvolution():
"""
@tests: FeatureDeconvolution
FeatureDeconvolution.__init__
"""
ff = pyopenms.FeatureDeconvolution()
p = ff.getDefaults()
_testParam(p)
assert pyopenms.FeatureDeconvolution().compute is not None
@report
def testInternalCalibration():
"""
@tests: InternalCalibration
InternalCalibration.__init__
"""
ff = pyopenms.InternalCalibration()
p = ff.getDefaults()
_testParam(p)
# TODO
# assert pyopenms.InternalCalibration().compute is not None
@report
def testItraqConstants():
"""
@tests: testItraqConstants
"""
constants = pyopenms.ItraqConstants()
assert pyopenms.ITRAQ_TYPES.FOURPLEX is not None
assert pyopenms.ITRAQ_TYPES.EIGHTPLEX is not None
assert pyopenms.ITRAQ_TYPES.TMT_SIXPLEX is not None
assert constants.getIsotopeMatrixAsStringList is not None
assert constants.updateIsotopeMatrixFromStringList is not None
assert constants.translateIsotopeMatrix is not None
@report
def testLinearResampler():
"""
@tests: LinearResampler
LinearResampler.__init__
"""
ff = pyopenms.LinearResampler()
p = ff.getDefaults()
_testParam(p)
assert pyopenms.LinearResampler().raster is not None
assert pyopenms.LinearResampler().rasterExperiment is not None
@report
def testPeptideAndProteinQuant():
"""
@tests: PeptideAndProteinQuant
PeptideAndProteinQuant.__init__
"""
ff = pyopenms.PeptideAndProteinQuant()
p = ff.getDefaults()
_testParam(p)
assert pyopenms.PeptideAndProteinQuant().quantifyPeptides is not None
assert pyopenms.PeptideAndProteinQuant().quantifyProteins is not None
@report
def testSeedListGenerator():
"""
@tests: SeedListGenerator
SeedListGenerator.__init__
"""
ff = pyopenms.SeedListGenerator()
p = ff.getDefaults()
_testParam(p)
# TODO
# assert pyopenms.SeedListGenerator().compute is not None
@report
def testTOFCalibration():
"""
@tests: TOFCalibration
TOFCalibration.__init__
"""
ff = pyopenms.TOFCalibration()
p = ff.getDefaults()
# _testParam(p)
assert pyopenms.TOFCalibration().calibrate is not None
assert pyopenms.TOFCalibration().pickAndCalibrate is not None
# TODO: re-enable as soon as ConsensusIDAlgorithm classes are wrapped
# @report
# def testConsensusID():
# """
# @tests: ConsensusID
# ConsensusID.__init__
# """
# ff = pyopenms.ConsensusID()
# p = ff.getDefaults()
# _testParam(p)
# assert pyopenms.ConsensusID().apply is not None
@report
def testFalseDiscoveryRate():
"""
@tests: FalseDiscoveryRate
FalseDiscoveryRate.__init__
"""
ff = pyopenms.FalseDiscoveryRate()
p = ff.getDefaults()
_testParam(p)
assert pyopenms.FalseDiscoveryRate().apply is not None
@report
def testIDFilter():
"""
@tests: IDFilter
IDFilter.__init__
"""
ff = pyopenms.IDFilter()
# assert pyopenms.IDFilter().apply is not None
@report
def testProteinResolver():
"""
@tests: ProteinResolver
ProteinResolver.__init__
"""
ff = pyopenms.ProteinResolver()
assert pyopenms.ProteinResolver().resolveConsensus is not None
assert pyopenms.ProteinResolver().resolveID is not None
assert pyopenms.ProteinResolver().setProteinData is not None
assert pyopenms.ProteinResolver().getResults is not None
@report
def testSvmTheoreticalSpectrumGeneratorTrainer():
"""
@tests: SvmTheoreticalSpectrumGeneratorTrainer
SvmTheoreticalSpectrumGeneratorTrainer.__init__
"""
ff = pyopenms.SvmTheoreticalSpectrumGeneratorTrainer()
assert pyopenms.SvmTheoreticalSpectrumGeneratorTrainer().trainModel is not None
assert pyopenms.SvmTheoreticalSpectrumGeneratorTrainer().normalizeIntensity is not None
@report
def testPosteriorErrorProbabilityModel():
"""
@tests: PosteriorErrorProbabilityModel
PosteriorErrorProbabilityModel.__init__
"""
model = pyopenms.PosteriorErrorProbabilityModel()
p = model.getDefaults()
_testParam(p)
assert pyopenms.PosteriorErrorProbabilityModel().fit is not None
assert pyopenms.PosteriorErrorProbabilityModel().computeProbability is not None
scores = [float(i) for i in range(10)]
model.fit(scores, "none")
model.fit(scores, scores, "none")
model.fillLogDensities(scores, scores, scores)
assert model.computeLogLikelihood is not None
assert model.pos_neg_mean_weighted_posteriors is not None
GaussFitResult = model.getCorrectlyAssignedFitResult()
GaussFitResult = model.getIncorrectlyAssignedFitResult()
model.getNegativePrior()
model.computeProbability(5.0)
# model.InitPlots
target = [float(i) for i in range(10)]
model.getGumbelGnuplotFormula(GaussFitResult)
model.getGaussGnuplotFormula(GaussFitResult)
model.getBothGnuplotFormula(GaussFitResult, GaussFitResult)
model.plotTargetDecoyEstimation(target, target)
model.getSmallestScore()
@report
def testSeedListGenerator():
"""
@tests: SeedListGenerator
SeedListGenerator.__init__
"""
ff = pyopenms.SeedListGenerator()
# TODO
# assert pyopenms.SeedListGenerator().generateSeedList is not None
@report
def testConsensusMapNormalizerAlgorithmMedian():
"""
@tests: ConsensusMapNormalizerAlgorithmMedian
ConsensusMapNormalizerAlgorithmMedian.__init__
"""
ff = pyopenms.ConsensusMapNormalizerAlgorithmMedian()
assert pyopenms.ConsensusMapNormalizerAlgorithmMedian().normalizeMaps is not None
@report
def testConsensusMapNormalizerAlgorithmQuantile():
"""
@tests: ConsensusMapNormalizerAlgorithmQuantile
ConsensusMapNormalizerAlgorithmQuantile.__init__
"""
ff = pyopenms.ConsensusMapNormalizerAlgorithmQuantile()
assert pyopenms.ConsensusMapNormalizerAlgorithmQuantile().normalizeMaps is not None
@report
def testConsensusMapNormalizerAlgorithmThreshold():
"""
@tests: ConsensusMapNormalizerAlgorithmThreshold
ConsensusMapNormalizerAlgorithmThreshold.__init__
"""
ff = pyopenms.ConsensusMapNormalizerAlgorithmThreshold()
assert pyopenms.ConsensusMapNormalizerAlgorithmThreshold().computeCorrelation is not None
assert pyopenms.ConsensusMapNormalizerAlgorithmThreshold().normalizeMaps is not None
@report
def testFeatureFinderAlgorithmPicked():
"""
@tests: FeatureFinderAlgorithmPicked
FeatureFinderAlgorithmPicked.__init__
"""
ff = pyopenms.FeatureFinderAlgorithmPicked()
assert pyopenms.FeatureFinderAlgorithmPicked().setData is not None
assert pyopenms.FeatureFinderAlgorithmPicked().run is not None
@report
def testFeatureFinderAlgorithmIsotopeWavelet():
"""
@tests: FeatureFinderAlgorithmIsotopeWavelet
FeatureFinderAlgorithmIsotopeWavelet.__init__
"""
ff = pyopenms.FeatureFinderAlgorithmIsotopeWavelet()
assert pyopenms.FeatureFinderAlgorithmIsotopeWavelet().setData is not None
assert pyopenms.FeatureFinderAlgorithmIsotopeWavelet().run is not None
@report
def testAScore():
"""
@tests: AScore
AScore.__init__
"""
ff = pyopenms.AScore()
hit = pyopenms.PeptideHit()
spectrum = pyopenms.MSSpectrum()
ff.compute(hit, spectrum)
# ff.computeCumulativeScore(1,1,0.5)
@report
def testIDRipper():
"""
@tests: IDRipper
IDRipper.__init__
IDRipper.rip
"""
ff = pyopenms.IDRipper()
assert pyopenms.IDRipper().rip is not None
@report
def testFASTAFile():
"""
@tests: FASTAFile
FASTAFile.__init__
FASTAFile.load
FASTAFile.store
"""
ff = pyopenms.FASTAFile()
assert pyopenms.FASTAFile().load is not None
assert pyopenms.FASTAFile().store is not None
@report
def testFASTAEntry():
"""
@tests: FASTAEntry
FASTAEntry.__init__
"""
ff = pyopenms.FASTAEntry()
@report
def testInternalCalibration():
"""
@tests: InternalCalibration
InternalCalibration.__init__
InternalCalibration.calibrateMapGlobally
InternalCalibration.calibrateMapSpectrumwise
"""
ff = pyopenms.InternalCalibration()
assert pyopenms.InternalCalibration().fillCalibrants is not None
assert pyopenms.InternalCalibration().getCalibrationPoints is not None
assert pyopenms.InternalCalibration().calibrate is not None
@report
def testTransitionTSVFile():
"""
@tests:
TransitionTSVFile.__init__
TransitionTSVFile.calibrateMapGlobally
TransitionTSVFile.calibrateMapSpectrumwise
"""
ff = pyopenms.TransitionTSVFile()
assert pyopenms.TransitionTSVFile().convertTargetedExperimentToTSV is not None
assert pyopenms.TransitionTSVFile().convertTSVToTargetedExperiment is not None
assert pyopenms.TransitionTSVFile().validateTargetedExperiment is not None
@report
def testProteaseDigestion():
"""
@tests: ProteaseDigestion
ProteaseDigestion.__init__
ProteaseDigestion.getMissedCleavages()
ProteaseDigestion.setMissedCleavages()
ProteaseDigestion.digest()
ProteaseDigestion.peptideCount()
"""
# removed due to name clashes
# ProteaseDigestion.getEnzyme()
# ProteaseDigestion.setEnzyme()
# ProteaseDigestion.getEnzymeByName()
ff = pyopenms.ProteaseDigestion()
#enz = pyopenms.ProteaseDigestion().Enzyme()
assert pyopenms.ProteaseDigestion().getMissedCleavages is not None
assert pyopenms.ProteaseDigestion().setMissedCleavages is not None
#assert pyopenms.ProteaseDigestion().getEnzyme is not None
#assert pyopenms.ProteaseDigestion().setEnzyme is not None
#assert pyopenms.ProteaseDigestion().getEnzymeByName is not None
assert pyopenms.ProteaseDigestion().digest is not None
assert pyopenms.ProteaseDigestion().peptideCount is not None
ff.setMissedCleavages(5)
assert ff.getMissedCleavages() == 5
#ff.setEnzyme(enz.TRYPSIN)
#assert ff.getEnzyme() == enz.TRYPSIN
@report
def testEnzymaticDigestionLogModel():
ff = pyopenms.EnzymaticDigestionLogModel()
assert pyopenms.EnzymaticDigestionLogModel().getLogThreshold is not None
assert pyopenms.EnzymaticDigestionLogModel().setLogThreshold is not None
assert pyopenms.EnzymaticDigestionLogModel().digest is not None
assert pyopenms.EnzymaticDigestionLogModel().peptideCount is not None
ff.setLogThreshold(0.25)
assert ff.getLogThreshold() == 0.25
@report
def testIDDecoyProbability():
"""
@tests: IDDecoyProbability
IDDecoyProbability.__init__
"""
ff = pyopenms.IDDecoyProbability()
assert pyopenms.IDDecoyProbability().apply is not None
@report
def testFeatureGrouping():
"""
@tests: FeatureGroupingAlgorithm
FeatureGroupingAlgorithm.getDefaults
FeatureGroupingAlgorithm.getName
FeatureGroupingAlgorithm.getParameters
FeatureGroupingAlgorithm.setName
FeatureGroupingAlgorithm.setParameters
FeatureGroupingAlgorithm.transferSubelements
FeatureGroupingAlgorithmQT.__init__
FeatureGroupingAlgorithmQT.getDefaults
FeatureGroupingAlgorithmQT.getName
FeatureGroupingAlgorithmQT.getParameters
FeatureGroupingAlgorithmQT.group
FeatureGroupingAlgorithmQT.setName
FeatureGroupingAlgorithmQT.setParameters
FeatureGroupingAlgorithmQT.transferSubelements
"""
assert pyopenms.FeatureGroupingAlgorithm.getDefaults is not None
assert pyopenms.FeatureGroupingAlgorithm.getName is not None
assert pyopenms.FeatureGroupingAlgorithm.getParameters is not None
assert pyopenms.FeatureGroupingAlgorithm.setName is not None
assert pyopenms.FeatureGroupingAlgorithm.setParameters is not None
assert pyopenms.FeatureGroupingAlgorithm.transferSubelements is not None
qt = pyopenms.FeatureGroupingAlgorithmQT()
qt.getDefaults()
qt.getParameters()
qt.getName()
assert qt.group is not None
assert qt.setName is not None
assert qt.setParameters is not None
assert qt.transferSubelements is not None
@report
def testFeatureMap():
"""
@tests: FeatureMap
FeatureMap.__init__
FeatureMap.__add__
FeatureMap.__iadd__
FeatureMap.__radd__
FeatureMap.__getitem__
FeatureMap.__iter__
FeatureMap.clear
FeatureMap.clearUniqueId
FeatureMap.ensureUniqueId
FeatureMap.getDataProcessing
FeatureMap.getProteinIdentifications
FeatureMap.getUnassignedPeptideIdentifications
FeatureMap.getUniqueId
FeatureMap.setUniqueId
FeatureMap.hasInvalidUniqueId
FeatureMap.hasValidUniqueId
FeatureMap.push_back
FeatureMap.setDataProcessing
FeatureMap.setProteinIdentifications
FeatureMap.setUnassignedPeptideIdentifications
FeatureMap.setUniqueIds
FeatureMap.size
FeatureMap.sortByIntensity
FeatureMap.sortByMZ
FeatureMap.sortByOverallQuality
FeatureMap.sortByPosition
FeatureMap.sortByRT
FeatureMap.swap
FeatureMap.updateRanges
"""
fm = pyopenms.FeatureMap()
fm_ = copy.copy(fm)
assert fm_ == fm
fm_ = copy.deepcopy(fm)
assert fm_ == fm
fm_ = pyopenms.FeatureMap(fm)
assert fm_ == fm
_testUniqueIdInterface(fm)
fm.clear()
fm.clearUniqueId()
fm.getIdentifier()
fm.getLoadedFileType()
fm.getLoadedFilePath()
f = pyopenms.Feature()
fm.push_back(f)
assert len(list(fm)) == 1
assert fm.size() == 1
assert fm[0] == f
fm.sortByIntensity()
assert fm.size() == 1
assert fm[0] == f
fm.sortByIntensity(False)
assert fm.size() == 1
assert fm[0] == f
fm.sortByPosition()
assert fm.size() == 1
assert fm[0] == f
fm.sortByRT()
assert fm.size() == 1
assert fm[0] == f
fm.sortByMZ()
assert fm.size() == 1
assert fm[0] == f
fm.sortByOverallQuality()
assert fm.size() == 1
assert fm[0] == f
fm2 = pyopenms.FeatureMap()
fm.swap(fm2)
assert fm2.size() == 1
assert fm2[0] == f
assert fm.size() == 0
fm2.updateRanges()
assert isinstance(fm2.getMin()[0], float)
assert isinstance(fm2.getMin()[1], float)
assert isinstance(fm2.getMax()[0], float)
assert isinstance(fm2.getMax()[1], float)
assert isinstance(fm2.getMinInt(), float)
assert isinstance(fm2.getMaxInt(), float)
assert fm2.getProteinIdentifications() == []
fm2.setProteinIdentifications([])
assert fm2.getUnassignedPeptideIdentifications() == []
fm2.setUnassignedPeptideIdentifications([])
fm2.clear()
assert fm2.size() == 0
dp = pyopenms.DataProcessing()
fm2.setDataProcessing([dp])
assert fm2.getDataProcessing() == [dp]
testDataProcessing(dp)
fm2.setUniqueIds()
fm += fm
assert fm + fm2 != fm
@report
def testFeatureXMLFile():
"""
@tests: FeatureXMLFile
FeatureXMLFile.__init__
FeatureXMLFile.load
FeatureXMLFile.store
FeatureXMLFile.getOptions
FeatureXMLFile.setOptions
FeatureXMLFile.loadSize
FileHandler.__init__
FileHandler.loadFeature
"""
fm = pyopenms.FeatureMap()
fm.setUniqueIds()
fh = pyopenms.FeatureXMLFile()
fh.store("test.featureXML", fm)
fh.load("test.featureXML", fm)
fh = pyopenms.FileHandler()
fh.loadFeatures("test.featureXML", fm)
@report
def testFileDescription():
"""
@tests: ColumnHeader
ColumnHeader.__init__
ColumnHeader.filename
ColumnHeader.label
ColumnHeader.size
ColumnHeader.unique_id
"""
fd = pyopenms.ColumnHeader()
_testStrOutput(fd.filename)
_testStrOutput(fd.label)
assert isinstance(fd.size, int)
# assert isinstance(fd.unique_id, (long, int, bytes))
@report
def testFileHandler():
"""
@tests: FileHandler
FileHandler.__init__
FileHandler.getType
FileHandler.loadExperiment
FileHandler.storeExperiment
"""
mse = pyopenms.MSExperiment()
fh = pyopenms.FileHandler()
fh.storeExperiment("test1.mzML", mse)
fh.loadExperiment("test1.mzML", mse)
fh.storeExperiment("test1.mzXML", mse)
fh.loadExperiment("test1.mzXML", mse)
fh.storeExperiment("test1.mzData", mse)
fh.loadExperiment("test1.mzData", mse)
@report
def testCachedMzML():
"""
"""
mse = pyopenms.MSExperiment()
s = pyopenms.MSSpectrum()
mse.addSpectrum(s)
# First load data and cache to disk
pyopenms.CachedmzML.store("myCache.mzML", mse)
# Now load data
cfile = pyopenms.CachedmzML()
pyopenms.CachedmzML.load("myCache.mzML", cfile)
meta_data = cfile.getMetaData()
assert cfile.getNrChromatograms() ==0
assert cfile.getNrSpectra() == 1
@report
def testIndexedMzMLFile():
"""
"""
mse = pyopenms.MSExperiment()
s = pyopenms.MSSpectrum()
mse.addSpectrum(s)
# First load data and cache to disk
pyopenms.MzMLFile().store("tfile_idx.mzML", mse)
# Now load data
ih = pyopenms.IndexedMzMLHandler("tfile_idx.mzML")
assert ih.getNrChromatograms() ==0
assert ih.getNrSpectra() == 1
s = ih.getMSSpectrumById(0)
s2 = ih.getSpectrumById(0)
@report
def testIDMapper():
"""
@tests: IDMapper
IDMapper.__init__
IDMapper.annotate
IDMapper.getDefaults
IDMapper.getName
IDMapper.getParameters
IDMapper.setName
IDMapper.setParameters
"""
idm = pyopenms.IDMapper()
assert idm.annotate is not None
idm.getDefaults()
idm.setName("x")
assert idm.getName() == "x"
idm.setParameters(idm.getParameters())
@report
def testIdXMLFile():
"""
@tests: IdXMLFile
IdXMLFile.__init__
IdXMLFile.load
IdXMLFile.store
"""
assert pyopenms.IdXMLFile().load is not None
assert pyopenms.IdXMLFile().store is not None
@report
def testPepXMLFile():
"""
@tests: PepXMLFile
PepXMLFile.__init__
PepXMLFile.load
PepXMLFile.store
"""
f = pyopenms.PepXMLFile()
assert pyopenms.PepXMLFile().load is not None
assert pyopenms.PepXMLFile().store is not None
@report
def testProtXMLFile():
"""
@tests: ProtXMLFile
ProtXMLFile.__init__
ProtXMLFile.load
ProtXMLFile.store
"""
f = pyopenms.ProtXMLFile()
assert pyopenms.ProtXMLFile().load is not None
assert pyopenms.ProtXMLFile().store is not None
@report
def testDTA2DFile():
"""
@tests: DTA2DFile
DTA2DFile.__init__
DTA2DFile.load
DTA2DFile.store
"""
f = pyopenms.DTA2DFile()
assert pyopenms.DTA2DFile().load is not None
assert pyopenms.DTA2DFile().store is not None
@report
def testDTAFile():
"""
@tests: DTAFile
DTAFile.__init__
DTAFile.load
DTAFile.store
"""
f = pyopenms.DTAFile()
assert pyopenms.DTAFile().load is not None
assert pyopenms.DTAFile().store is not None
@report
def testEDTAFile():
"""
@tests: EDTAFile
EDTAFile.__init__
EDTAFile.load
EDTAFile.store
"""
f = pyopenms.EDTAFile()
assert pyopenms.EDTAFile().load is not None
assert pyopenms.EDTAFile().store is not None
@report
def testKroenikFile():
"""
@tests: KroenikFile
KroenikFile.__init__
KroenikFile.load
KroenikFile.store
"""
f = pyopenms.KroenikFile()
assert pyopenms.KroenikFile().load is not None
assert pyopenms.KroenikFile().store is not None
@report
def testMSPFile():
"""
@tests: MSPFile
MSPFile.__init__
"""
f = pyopenms.MSPFile()
# assert pyopenms.KroenikFile().load is not None
# assert pyopenms.KroenikFile().store is not None
@report
def testMzIdentMLFile():
"""
@tests: MzIdentMLFile
MzIdentMLFile.__init__
"""
f = pyopenms.MzIdentMLFile()
assert pyopenms.MzIdentMLFile().load is not None
assert pyopenms.MzIdentMLFile().store is not None
assert pyopenms.MzIdentMLFile().isSemanticallyValid is not None
@report
def testMzTabFile():
"""
@tests: MzTabFile
MzTabFile.__init__
"""
f = pyopenms.MzTabFile()
# assert pyopenms.MzTabFile().store is not None
@report
def testMzTab():
"""
@tests: MzTab
MzTab.__init__
"""
# f = pyopenms.MzTab()
@report
def testInstrumentSettings():
"""
@tests: InstrumentSettings
InstrumentSettings.__init__
InstrumentSettings.clearMetaInfo
InstrumentSettings.getKeys
InstrumentSettings.getMetaValue
InstrumentSettings.getPolarity
InstrumentSettings.isMetaEmpty
InstrumentSettings.metaValueExists
InstrumentSettings.removeMetaValue
InstrumentSettings.setMetaValue
InstrumentSettings.setPolarity
InstrumentSettings.__eq__
InstrumentSettings.__ge__
InstrumentSettings.__gt__
InstrumentSettings.__le__
InstrumentSettings.__lt__
InstrumentSettings.__ne__
"""
ins = pyopenms.InstrumentSettings()
_testMetaInfoInterface(ins)
ins.setPolarity(pyopenms.IonSource.Polarity.NEGATIVE)
assert ins.getPolarity() == pyopenms.IonSource.Polarity.NEGATIVE
assert ins == ins
assert not ins != ins
@report
def testContactPerson():
"""
@tests: ContactPerson
ContactPerson.__init__
ContactPerson.getFirstName
ContactPerson.setFirstName
ContactPerson.getLastName
ContactPerson.setLastName
ContactPerson.setName
ContactPerson.getInstitution
ContactPerson.setInstitution
ContactPerson.getEmail
ContactPerson.setEmail
ContactPerson.getURL
ContactPerson.setURL
ContactPerson.getAddress
ContactPerson.setAddress
ContactPerson.getContactInfo
ContactPerson.setContactInfo
"""
ins = pyopenms.ContactPerson()
ins.getFirstName()
ins.setFirstName("test")
ins.getLastName()
ins.setLastName("test")
ins.setName("Testy Test")
ins.getInstitution()
ins.setInstitution("test")
ins.getEmail()
ins.setEmail("test")
ins.getURL()
ins.setURL("test")
ins.getAddress()
ins.setAddress("test")
ins.getContactInfo()
ins.setContactInfo("test")
@report
def testDocumentIdentifier():
"""
@tests: DocumentIdentifier
DocumentIdentifier.__init__
DocumentIdentifier.setIdentifier
DocumentIdentifier.getIdentifier
DocumentIdentifier.setLoadedFilePath
DocumentIdentifier.getLoadedFilePath
DocumentIdentifier.setLoadedFileType
DocumentIdentifier.getLoadedFileType
"""
ins = pyopenms.DocumentIdentifier()
ins.setIdentifier("test")
ins.getIdentifier()
# ins.setLoadedFilePath("Test")
ins.getLoadedFilePath()
# ins.setLoadedFileType("test")
ins.getLoadedFileType()
@report
def testGradient():
"""
@tests: Gradient
Gradient.__init__
Gradient.addEluent
Gradient.addEluent
Gradient.clearEluents
Gradient.getEluents
Gradient.addTimepoint
Gradient.clearTimepoints
Gradient.getTimepoints
Gradient.getPercentage
Gradient.setPercentage
Gradient.clearPercentages
Gradient.isValid
"""
ins = pyopenms.Gradient()
ins.addEluent("test")
ins.clearEluents()
assert len(ins.getEluents() ) == 0
ins.addEluent("test")
assert len(ins.getEluents() ) == 1
ins.clearTimepoints()
ins.addTimepoint(5)
ins.getTimepoints()
ins.setPercentage("test", 5, 20)
ins.getPercentage("test", 5)
ins.clearPercentages()
ins.isValid()
@report
def testHPLC():
"""
@tests: HPLC
HPLC.__init__
HPLC.getInstrument
HPLC.setInstrument
HPLC.getColumn
HPLC.setColumn
HPLC.getTemperature
HPLC.setTemperature
HPLC.getPressure
HPLC.setPressure
HPLC.getFlux
HPLC.setFlux
HPLC.setComment
HPLC.getComment
HPLC.setGradient
HPLC.getGradient
"""
ins = pyopenms.HPLC()
ins.setInstrument("test")
ins.getInstrument()
ins.setColumn("test")
ins.getColumn()
ins.setTemperature(6)
ins.getTemperature()
ins.setPressure(6)
ins.getPressure()
ins.setFlux(8)
ins.getFlux()
ins.setComment("test")
ins.getComment()
g = pyopenms.Gradient()
ins.setGradient(g)
ins.getGradient()
@report
def testInstrument():
"""
@tests: Instrument
Instrument.__init__
Instrument.setName
Instrument.getName
Instrument.setVendor
Instrument.getVendor
Instrument.setModel
Instrument.getModel
Instrument.setCustomizations
Instrument.getCustomizations
Instrument.setIonSources
Instrument.getIonSources
Instrument.setMassAnalyzers
Instrument.getMassAnalyzers
Instrument.setIonDetectors
Instrument.getIonDetectors
Instrument.setSoftware
Instrument.getSoftware
"""
ins = pyopenms.Instrument()
ins.setName("test")
ins.getName()
ins.setVendor("test")
ins.getVendor()
ins.setModel("test")
ins.getModel()
ins.setCustomizations("test")
ins.getCustomizations()
ion_sources = [ pyopenms.IonSource() for i in range(5)]
ins.setIonSources(ion_sources)
ins.getIonSources()
mass_analyzers = [ pyopenms.MassAnalyzer() for i in range(5)]
ins.setMassAnalyzers(mass_analyzers)
ins.getMassAnalyzers()
ion_detectors = [ pyopenms.IonDetector() for i in range(5)]
ins.setIonDetectors(ion_detectors)
ins.getIonDetectors()
s = pyopenms.Software()
ins.setSoftware(s)
ins.getSoftware()
@report
def testIonDetector():
"""
@tests: IonDetector
IonDetector.__init__
IonDetector.setAcquisitionMode
IonDetector.getAcquisitionMode
IonDetector.setResolution
IonDetector.getResolution
IonDetector.setADCSamplingFrequency
IonDetector.getADCSamplingFrequency
IonDetector.setOrder
IonDetector.getOrder
"""
ins = pyopenms.IonDetector()
m = pyopenms.IonDetector.AcquisitionMode.ACQMODENULL
ins.setAcquisitionMode(m)
ins.getAcquisitionMode()
ins.setResolution(8.0)
ins.getResolution()
ins.setADCSamplingFrequency(8.0)
ins.getADCSamplingFrequency()
ins.setOrder(8)
ins.getOrder()
@report
def testIonSource():
"""
@tests: IonSource
IonSource.__init__
IonSource.setPolarity
IonSource.getPolarity
IonSource.setInletType
IonSource.getInletType
IonSource.setIonizationMethod
IonSource.getIonizationMethod
IonSource.setOrder
IonSource.getOrder
"""
ins = pyopenms.IonSource()
p = pyopenms.IonSource.Polarity.POSITIVE
ins.setPolarity(p)
ins.getPolarity()
i = pyopenms.IonSource.InletType.INLETNULL
ins.setInletType(i)
ins.getInletType()
i = pyopenms.IonSource.IonizationMethod.ESI
ins.setIonizationMethod(i)
ins.getIonizationMethod()
ins.setOrder(5)
ins.getOrder()
@report
def testMassAnalyzer():
"""
@tests: MassAnalyzer
MassAnalyzer.__init__
MassAnalyzer.setType
MassAnalyzer.getType
MassAnalyzer.setResolutionMethod
MassAnalyzer.getResolutionMethod
MassAnalyzer.setResolutionType
MassAnalyzer.getResolutionType
MassAnalyzer.setScanDirection
MassAnalyzer.getScanDirection
MassAnalyzer.setScanLaw
MassAnalyzer.getScanLaw
MassAnalyzer.setReflectronState
MassAnalyzer.getReflectronState
MassAnalyzer.setResolution
MassAnalyzer.getResolution
MassAnalyzer.setAccuracy
MassAnalyzer.getAccuracy
MassAnalyzer.setScanRate
MassAnalyzer.getScanRate
MassAnalyzer.setScanTime
MassAnalyzer.getScanTime
MassAnalyzer.setTOFTotalPathLength
MassAnalyzer.getTOFTotalPathLength
MassAnalyzer.setIsolationWidth
MassAnalyzer.getIsolationWidth
MassAnalyzer.setFinalMSExponent
MassAnalyzer.getFinalMSExponent
MassAnalyzer.setMagneticFieldStrength
MassAnalyzer.getMagneticFieldStrength
MassAnalyzer.setOrder
MassAnalyzer.getOrder
"""
ins = pyopenms.MassAnalyzer()
ma = pyopenms.MassAnalyzer.AnalyzerType.QUADRUPOLE
ins.setType(ma)
ins.getType()
res = pyopenms.MassAnalyzer.ResolutionMethod.FWHM
ins.setResolutionMethod(res)
ins.getResolutionMethod()
res = pyopenms.MassAnalyzer.ResolutionType.CONSTANT
ins.setResolutionType(res)
ins.getResolutionType()
res = pyopenms.MassAnalyzer.ScanDirection.UP
ins.setScanDirection(res)
ins.getScanDirection()
res = pyopenms.MassAnalyzer.ScanLaw.LINEAR
ins.setScanLaw(res)
ins.getScanLaw()
res = pyopenms.MassAnalyzer.ReflectronState.ON
ins.setReflectronState(res)
ins.getReflectronState()
ins.setResolution(5.0)
ins.getResolution()
ins.setAccuracy(5.0)
ins.getAccuracy()
ins.setScanRate(5.0)
ins.getScanRate()
ins.setScanTime(5.0)
ins.getScanTime()
ins.setTOFTotalPathLength(5.0)
ins.getTOFTotalPathLength()
ins.setIsolationWidth(5.0)
ins.getIsolationWidth()
ins.setFinalMSExponent(5)
ins.getFinalMSExponent()
ins.setMagneticFieldStrength(5.0)
ins.getMagneticFieldStrength()
ins.setOrder(5)
ins.getOrder()
@report
def testSample():
"""
@tests: Sample
Sample.__init__
Sample.setName
Sample.getName
Sample.setOrganism
Sample.getOrganism
Sample.setNumber
Sample.getNumber
Sample.setComment
Sample.getComment
Sample.setState
Sample.getState
Sample.setMass
Sample.getMass
Sample.setVolume
Sample.getVolume
Sample.setConcentration
Sample.getConcentration
Sample.getSubsamples
Sample.setSubsamples
Sample.removeTreatment
Sample.countTreatments
"""
ins = pyopenms.Sample()
ins.setName("test")
ins.getName()
ins.setOrganism("test")
ins.getOrganism()
ins.setNumber("test")
ins.getNumber()
ins.setComment("test")
ins.getComment()
state = pyopenms.Sample.SampleState.LIQUID
ins.setState(state)
ins.getState()
ins.setMass(42.0)
ins.getMass()
ins.setVolume(42.0)
ins.getVolume()
ins.setConcentration(42.0)
ins.getConcentration()
a = ins.getSubsamples()
ins.setSubsamples(a)
has_exception = False
try:
ins.removeTreatment(0)
except Exception:
has_exception = True
assert has_exception
assert ins.countTreatments() == 0
@report
def testLogType():
"""
@tests: LogType
LogType.CMD
LogType.GUI
LogType.NONE
"""
assert isinstance(pyopenms.LogType.CMD, int)
assert isinstance(pyopenms.LogType.GUI, int)
assert isinstance(pyopenms.LogType.NONE, int)
@report
def testMSExperiment():
"""
@tests: MSExperiment
MSExperiment.__init__
MSExperiment.getLoadedFilePath
MSExperiment.getMaxMZ
MSExperiment.getMaxRT
MSExperiment.getMetaValue
MSExperiment.getMinMZ
MSExperiment.getMinRT
MSExperiment.push_back
MSExperiment.setLoadedFilePath
MSExperiment.setMetaValue
MSExperiment.size
MSExperiment.sortSpectra
MSExperiment.updateRanges
MSExperiment.__eq__
MSExperiment.__ge__
MSExperiment.__getitem__
MSExperiment.__gt__
MSExperiment.__iter__
MSExperiment.__le__
MSExperiment.__lt__
MSExperiment.__ne__
MSExperiment.clearMetaInfo
MSExperiment.getKeys
MSExperiment.isMetaEmpty
MSExperiment.metaValueExists
MSExperiment.removeMetaValue
MSExperiment.getSize
MSExperiment.isSorted
MSExperiment.get2DPeakDataLong
"""
mse = pyopenms.MSExperiment()
mse_ = copy.copy(mse)
assert mse_ == mse
mse_ = copy.deepcopy(mse)
assert mse_ == mse
mse_ = pyopenms.MSExperiment(mse)
assert mse_ == mse
_testMetaInfoInterface(mse)
mse.updateRanges()
mse.sortSpectra(True)
assert isinstance(mse.getMaxRT(), float)
assert isinstance(mse.getMinRT(), float)
assert isinstance(mse.getMaxMZ(), float)
assert isinstance(mse.getMinMZ(), float)
_testStrOutput(mse.getLoadedFilePath())
assert isinstance(mse.getMinInt(), float)
assert isinstance(mse.getMaxInt(), float)
assert isinstance(mse.getMin()[0], float)
assert isinstance(mse.getMin()[1], float)
assert isinstance(mse.getMax()[0], float)
assert isinstance(mse.getMax()[1], float)
mse.setLoadedFilePath("")
assert mse.size() == 0
mse.getIdentifier()
mse.getLoadedFileType()
mse.getLoadedFilePath()
spec = pyopenms.MSSpectrum()
data_mz = np.array( [5.0, 8.0] ).astype(np.float64)
data_i = np.array( [50.0, 80.0] ).astype(np.float32)
spec.set_peaks( [data_mz,data_i] )
mse.addSpectrum(spec)
assert mse.size() == 1
assert mse[0] is not None
mse.updateRanges()
rt, mz, inty = mse.get2DPeakDataLong(mse.getMinRT(),mse.getMaxRT(),mse.getMinMZ(),mse.getMaxMZ())
assert rt.shape[0] == 2
assert mz.shape[0] == 2
assert inty.shape[0] == 2
assert isinstance(list(mse), list)
assert mse == mse
assert not mse != mse
assert mse.getSize() >= 0
assert int(mse.isSorted()) in (0,1)
mse2 = copy.copy(mse)
assert mse.getSize() == mse2.getSize()
assert mse2 == mse
@report
def testMSQuantifications():
"""
@tests: MSQuantifications
MSQuantifications.__eq__
MSQuantifications.__ge__
MSQuantifications.__gt__
MSQuantifications.__init__
MSQuantifications.__le__
MSQuantifications.__lt__
MSQuantifications.__ne__
MSQuantifications.getConsensusMaps
MSQuantifications.setConsensusMaps
MSQuantifications.setDataProcessing
MSQuantifications.getDataProcessing
MSQuantifications.getAssays
MSQuantifications.getFeatureMaps
MSQuantifications.setAnalysisSummaryQuantType
MSQuantifications.getAnalysisSummary
MSQuantifications.addConsensusMap
MSQuantifications.assignUIDs
"""
msq = pyopenms.MSQuantifications()
assert msq == msq
assert not msq != msq
msq.setConsensusMaps(msq.getConsensusMaps())
summary = msq.getAnalysisSummary()
msq.setDataProcessingList(msq.getDataProcessingList())
msq.getAssays()
msq.getFeatureMaps()
msq.setAnalysisSummaryQuantType(pyopenms.MSQuantifications.QUANT_TYPES.LABELFREE)
msq.addConsensusMap(pyopenms.ConsensusMap())
msq.assignUIDs()
@report
def testMSSpectrum():
"""
@tests: MSSpectrum
MSSpectrum.__init__
MSSpectrum.clear
MSSpectrum.clearMetaInfo
MSSpectrum.findNearest
MSSpectrum.getAcquisitionInfo
MSSpectrum.getComment
MSSpectrum.getDataProcessing
MSSpectrum.getInstrumentSettings
MSSpectrum.getKeys
MSSpectrum.getMSLevel
MSSpectrum.getMetaValue
MSSpectrum.getName
MSSpectrum.getNativeID
MSSpectrum.getPeptideIdentifications
MSSpectrum.getPrecursors
MSSpectrum.getProducts
MSSpectrum.getRT
MSSpectrum.getSourceFile
MSSpectrum.getType
MSSpectrum.get_peaks
MSSpectrum.intensityInRange
MSSpectrum.isMetaEmpty
MSSpectrum.isSorted
MSSpectrum.metaValueExists
MSSpectrum.push_back
MSSpectrum.removeMetaValue
MSSpectrum.setAcquisitionInfo
MSSpectrum.setComment
MSSpectrum.setDataProcessing
MSSpectrum.setInstrumentSettings
MSSpectrum.setMSLevel
MSSpectrum.setMetaValue
MSSpectrum.setName
MSSpectrum.setNativeID
MSSpectrum.setPeptideIdentifications
MSSpectrum.setPrecursors
MSSpectrum.setProducts
MSSpectrum.setRT
MSSpectrum.setSourceFile
MSSpectrum.setType
MSSpectrum.set_peaks
MSSpectrum.size
MSSpectrum.unify
MSSpectrum.updateRanges
MSSpectrum.__eq__
MSSpectrum.__ge__
MSSpectrum.__getitem__
MSSpectrum.__gt__
MSSpectrum.__le__
MSSpectrum.__lt__
MSSpectrum.__ne__
"""
spec = pyopenms.MSSpectrum()
spec_ = copy.copy(spec)
assert spec_ == spec
spec_ = copy.deepcopy(spec)
assert spec_ == spec
spec_ = pyopenms.MSSpectrum(spec)
assert spec_ == spec
_testMetaInfoInterface(spec)
testSpectrumSetting(spec)
spec.setRT(3.0)
assert spec.getRT() == 3.0
spec.setMSLevel(2)
assert spec.getMSLevel() == 2
spec.setName("spec")
assert spec.getName() == "spec"
p = pyopenms.Peak1D()
p.setMZ(1000.0)
p.setIntensity(200.0)
spec.push_back(p)
assert spec.size() == 1
assert spec[0] == p
spec.updateRanges()
assert isinstance(spec.findNearest(0.0), int)
assert isinstance(spec.getMin()[0], float)
assert isinstance(spec.getMax()[0], float)
assert isinstance(spec.getMinInt(), float)
assert isinstance(spec.getMaxInt(), float)
assert spec == spec
assert not spec != spec
mz, ii = spec.get_peaks()
assert len(mz) == len(ii)
assert len(mz) == 1
spec.set_peaks((mz, ii))
mz0, ii0 = spec.get_peaks()
assert mz0 == mz
assert ii0 == ii
assert int(spec.isSorted()) in (0,1)
spec.clear(False)
p = pyopenms.Peak1D()
p.setMZ(1000.0)
p.setIntensity(200.0)
spec.push_back(p)
p = pyopenms.Peak1D()
p.setMZ(2000.0)
p.setIntensity(400.0)
spec.push_back(p)
mz, ii = spec.get_peaks()
assert spec[0].getMZ() == 1000.0
assert spec[1].getMZ() == 2000.0
assert spec[0].getIntensity() == 200.0
assert spec[1].getIntensity() == 400.0
assert mz[0] == 1000.0
assert mz[1] == 2000.0
assert ii[0] == 200.0
assert ii[1] == 400.0
spec.clear(False)
data_mz = np.array( [5.0, 8.0] ).astype(np.float64)
data_i = np.array( [50.0, 80.0] ).astype(np.float32)
spec.set_peaks( [data_mz,data_i] )
mz, ii = spec.get_peaks()
assert spec[0].getMZ() == 5.0
assert spec[1].getMZ() == 8.0
assert spec[0].getIntensity() == 50.0
assert spec[1].getIntensity() == 80.0
assert mz[0] == 5.0
assert mz[1] == 8.0
assert ii[0] == 50.0
assert ii[1] == 80.0
# Fast
spec.clear(False)
data_mz = np.array( [5.0, 8.0] ).astype(np.float64)
data_i = np.array( [50.0, 80.0] ).astype(np.float64)
spec.set_peaks( [data_mz,data_i] )
mz, ii = spec.get_peaks()
assert spec[0].getMZ() == 5.0
assert spec[1].getMZ() == 8.0
assert spec[0].getIntensity() == 50.0
assert spec[1].getIntensity() == 80.0
assert mz[0] == 5.0
assert mz[1] == 8.0
assert ii[0] == 50.0
assert ii[1] == 80.0
# Slow
spec.clear(False)
data_mz = np.array( [5.0, 8.0] ).astype(np.float32)
data_i = np.array( [50.0, 80.0] ).astype(np.float32)
spec.set_peaks( [data_mz,data_i] )
mz, ii = spec.get_peaks()
assert spec[0].getMZ() == 5.0
assert spec[1].getMZ() == 8.0
assert spec[0].getIntensity() == 50.0
assert spec[1].getIntensity() == 80.0
assert mz[0] == 5.0
assert mz[1] == 8.0
assert ii[0] == 50.0
assert ii[1] == 80.0
###################################
# get data arrays
###################################
assert len(spec.getStringDataArrays()) == 0
string_da = [ pyopenms.StringDataArray() ]
string_da[0].push_back("hello")
string_da[0].push_back("world")
string_da.append( pyopenms.StringDataArray() )
string_da[1].push_back("other")
spec.setStringDataArrays( string_da )
assert len(spec.getStringDataArrays()) == 2
assert spec.getStringDataArrays()[0][0] == b"hello"
assert spec.getStringDataArrays()[1][0] == b"other"
spec = pyopenms.MSSpectrum()
assert len(spec.getIntegerDataArrays()) == 0
int_da = [ pyopenms.IntegerDataArray() ]
int_da[0].push_back(5)
int_da[0].push_back(6)
int_da.append( pyopenms.IntegerDataArray() )
int_da[1].push_back(8)
spec.setIntegerDataArrays( int_da )
assert len(spec.getIntegerDataArrays()) == 2
assert spec.getIntegerDataArrays()[0][0] == 5
assert spec.getIntegerDataArrays()[1][0] == 8
spec = pyopenms.MSSpectrum()
data = np.array( [5, 8, 42] ).astype(np.intc)
int_da = [ pyopenms.IntegerDataArray() ]
int_da[0].set_data(data)
spec.setIntegerDataArrays( int_da )
assert len(spec.getIntegerDataArrays()) == 1
assert spec.getIntegerDataArrays()[0][0] == 5
assert spec.getIntegerDataArrays()[0][2] == 42
assert len(int_da[0].get_data() ) == 3
spec = pyopenms.MSSpectrum()
assert len(spec.getFloatDataArrays()) == 0
f_da = [ pyopenms.FloatDataArray() ]
f_da[0].push_back(5.0)
f_da[0].push_back(6.0)
f_da.append( pyopenms.FloatDataArray() )
f_da[1].push_back(8.0)
spec.setFloatDataArrays( f_da )
assert len(spec.getFloatDataArrays()) == 2.0
assert spec.getFloatDataArrays()[0][0] == 5.0
assert spec.getFloatDataArrays()[1][0] == 8.0
spec = pyopenms.MSSpectrum()
data = np.array( [5, 8, 42] ).astype(np.float32)
f_da = [ pyopenms.FloatDataArray() ]
f_da[0].set_data(data)
spec.setFloatDataArrays( f_da )
assert len(spec.getFloatDataArrays()) == 1
assert spec.getFloatDataArrays()[0][0] == 5.0
assert spec.getFloatDataArrays()[0][2] == 42.0
assert len(f_da[0].get_data() ) == 3
@report
def testStringDataArray():
"""
@tests: StringDataArray
"""
da = pyopenms.StringDataArray()
assert da.size() == 0
da.push_back("hello")
da.push_back("world")
assert da.size() == 2
assert da[0] == b"hello"
assert da[1] == b"world"
da[1] = b"hello world"
assert da[1] == b"hello world", da[1]
da.clear()
assert da.size() == 0
da.push_back("hello")
assert da.size() == 1
da.resize(3)
da[0] = b"hello"
da[1] = b""
da[2] = b"world"
assert da.size() == 3
@report
def testIntegerDataArray():
"""
@tests: IntegerDataArray
"""
da = pyopenms.IntegerDataArray()
assert da.size() == 0
da.push_back(1)
da.push_back(4)
assert da.size() == 2
assert da[0] == 1
assert da[1] == 4
da[1] = 7
assert da[1] == 7
da.clear()
assert da.size() == 0
da.push_back(1)
assert da.size() == 1
da.resize(3)
da[0] = 1
da[1] = 2
da[2] = 3
assert da.size() == 3
q = da.get_data()
q = np.append(q, 4).astype(np.intc)
da.set_data(q)
assert da.size() == 4
@report
def testFloatDataArray():
"""
@tests: FloatDataArray
"""
da = pyopenms.FloatDataArray()
assert da.size() == 0
da.push_back(1.0)
da.push_back(4.0)
assert da.size() == 2
assert da[0] == 1.0
assert da[1] == 4.0
da[1] = 7.0
assert da[1] == 7.0
da.clear()
assert da.size() == 0
da.push_back(1.0)
assert da.size() == 1
da.resize(3)
da[0] = 1.0
da[1] = 2.0
da[2] = 3.0
assert da.size() == 3
q = da.get_data()
q = np.append(q, 4.0).astype(np.float32)
da.set_data(q)
assert da.size() == 4
@report
def testMSChromatogram():
"""
@tests: MSChromatogram
MSChromatogram.__init__
MSChromatogram.__copy__
"""
chrom = pyopenms.MSChromatogram()
chrom_ = copy.copy(chrom)
assert chrom_ == chrom
chrom_ = copy.deepcopy(chrom)
assert chrom_ == chrom
chrom_ = pyopenms.MSChromatogram(chrom)
assert chrom_ == chrom
_testMetaInfoInterface(chrom)
chrom.setName("chrom")
assert chrom.getName() == "chrom"
p = pyopenms.ChromatogramPeak()
p.setRT(1000.0)
p.setIntensity(200.0)
chrom.push_back(p)
assert chrom.size() == 1
assert chrom[0] == p
chrom.updateRanges()
assert isinstance(chrom.findNearest(0.0), int)
assert isinstance(chrom.getMin()[0], float)
assert isinstance(chrom.getMax()[0], float)
assert isinstance(chrom.getMinInt(), float)
assert isinstance(chrom.getMaxInt(), float)
assert chrom == chrom
assert not chrom != chrom
mz, ii = chrom.get_peaks()
assert len(mz) == len(ii)
assert len(mz) == 1
chrom.set_peaks((mz, ii))
mz0, ii0 = chrom.get_peaks()
assert mz0 == mz
assert ii0 == ii
assert int(chrom.isSorted()) in (0,1)
chrom.clear(False)
p = pyopenms.ChromatogramPeak()
p.setRT(1000.0)
p.setIntensity(200.0)
chrom.push_back(p)
p = pyopenms.ChromatogramPeak()
p.setRT(2000.0)
p.setIntensity(400.0)
chrom.push_back(p)
mz, ii = chrom.get_peaks()
assert chrom[0].getRT() == 1000.0
assert chrom[1].getRT() == 2000.0
assert chrom[0].getIntensity() == 200.0
assert chrom[1].getIntensity() == 400.0
assert mz[0] == 1000.0
assert mz[1] == 2000.0
assert ii[0] == 200.0
assert ii[1] == 400.0
chrom.clear(False)
data_mz = np.array( [5.0, 8.0] ).astype(np.float64)
data_i = np.array( [50.0, 80.0] ).astype(np.float32)
chrom.set_peaks( [data_mz,data_i] )
mz, ii = chrom.get_peaks()
assert chrom[0].getRT() == 5.0
assert chrom[1].getRT() == 8.0
assert chrom[0].getIntensity() == 50.0
assert chrom[1].getIntensity() == 80.0
assert mz[0] == 5.0
assert mz[1] == 8.0
assert ii[0] == 50.0
assert ii[1] == 80.0
# Fast
chrom.clear(False)
data_mz = np.array( [5.0, 8.0] ).astype(np.float64)
data_i = np.array( [50.0, 80.0] ).astype(np.float64)
chrom.set_peaks( [data_mz,data_i] )
mz, ii = chrom.get_peaks()
assert chrom[0].getRT() == 5.0
assert chrom[1].getRT() == 8.0
assert chrom[0].getIntensity() == 50.0
assert chrom[1].getIntensity() == 80.0
assert mz[0] == 5.0
assert mz[1] == 8.0
assert ii[0] == 50.0
assert ii[1] == 80.0
# Slow
chrom.clear(False)
data_mz = np.array( [5.0, 8.0] ).astype(np.float32)
data_i = np.array( [50.0, 80.0] ).astype(np.float32)
chrom.set_peaks( [data_mz,data_i] )
mz, ii = chrom.get_peaks()
assert chrom[0].getRT() == 5.0
assert chrom[1].getRT() == 8.0
assert chrom[0].getIntensity() == 50.0
assert chrom[1].getIntensity() == 80.0
assert mz[0] == 5.0
assert mz[1] == 8.0
assert ii[0] == 50.0
assert ii[1] == 80.0
@report
def testMRMFeature():
"""
@tests: MRMFeature
MRMFeature.__init__
MRMFeature.addScore
MRMFeature.getScore
"""
mrmfeature = pyopenms.MRMFeature()
f = pyopenms.Feature()
fs = mrmfeature.getFeatures()
assert len(fs) == 0
mrmfeature.addFeature(f, "myFeature")
fs = mrmfeature.getFeatures()
assert len(fs) == 1
assert mrmfeature.getFeature("myFeature") is not None
slist = []
mrmfeature.getFeatureIDs(slist)
assert len(slist) == 1
mrmfeature.addPrecursorFeature(f, "myFeature_Pr0")
slist = []
mrmfeature.getPrecursorFeatureIDs(slist)
assert len(slist) == 1
assert mrmfeature.getPrecursorFeature("myFeature_Pr0") is not None
s = mrmfeature.getScores()
assert abs(s.yseries_score - 0.0) < 1e-4
s.yseries_score = 4.0
mrmfeature.setScores(s)
s2 = mrmfeature.getScores()
assert abs(s2.yseries_score - 4.0) < 1e-4
@report
def testConfidenceScoring():
"""
@tests: ConfidenceScoring
ConfidenceScoring.__init__
"""
scoring = pyopenms.ConfidenceScoring()
@report
def testMRMDecoy():
"""
@tests: MRMDecoy
MRMDecoy.__init__
"""
mrmdecoy = pyopenms.MRMDecoy()
assert mrmdecoy is not None
assert pyopenms.MRMDecoy().generateDecoys is not None
@report
def testMRMTransitionGroup():
"""
@tests: MRMTransitionGroup
"""
mrmgroup = pyopenms.MRMTransitionGroupCP()
assert mrmgroup is not None
mrmgroup.setTransitionGroupID("this_id")
assert mrmgroup.getTransitionGroupID() == "this_id"
assert len(mrmgroup.getTransitions()) == 0
mrmgroup.addTransition(pyopenms.ReactionMonitoringTransition(), "tr1")
assert len(mrmgroup.getTransitions()) == 1
@report
def testReactionMonitoringTransition():
"""
@tests: ReactionMonitoringTransition
"""
tr = pyopenms.ReactionMonitoringTransition()
@report
def testTargetedExperiment():
"""
@tests: TargetedExperiment
"""
m = pyopenms.TargetedExperiment()
m_ = copy.copy(m)
assert m_ == m
m_ = copy.deepcopy(m)
assert m_ == m
m_ = pyopenms.TargetedExperiment(m)
assert m_ == m
m.clear(True)
m.setCVs(m.getCVs())
targeted = m
targeted.setCVs(targeted.getCVs())
targeted.setTargetCVTerms(targeted.getTargetCVTerms())
targeted.setPeptides(targeted.getPeptides())
targeted.setProteins(targeted.getProteins())
targeted.setTransitions(targeted.getTransitions())
assert m == m
assert not m != m
@report
def testTargetedExperimentHelper():
"""
@tests: TargetedExperimentHelper
"""
rtu = pyopenms.RetentionTime.RTUnit()
rtu = pyopenms.RetentionTime.RTUnit.SECOND
rtu = pyopenms.RetentionTime.RTUnit.MINUTE
rtt = pyopenms.RetentionTime.RTType()
rtt = pyopenms.RetentionTime.RTType.LOCAL
rtt = pyopenms.RetentionTime.RTType.NORMALIZED
rtt = pyopenms.RetentionTime.RTType.IRT
rt = pyopenms.RetentionTime()
assert rt.software_ref is not None
assert not rt.isRTset()
rt.setRT(5.0)
rt.retention_time_unit = pyopenms.RetentionTime.RTUnit.SECOND
rt.retention_time_type = pyopenms.RetentionTime.RTType.NORMALIZED
assert rt.isRTset()
assert rt.getRT() == 5.0
p = pyopenms.Peptide()
assert p.rts is not None
assert p.id is not None
assert p.protein_refs is not None
assert p.evidence is not None
assert p.sequence is not None
assert p.mods is not None
assert not p.hasCharge()
p.setChargeState(5)
assert p.hasCharge()
assert p.getChargeState() == 5
assert not p.hasRetentionTime()
p.rts = [rt]
assert p.hasRetentionTime()
assert p.getRetentionTime() == 5.0
assert p.getRetentionTimeUnit() == pyopenms.RetentionTime.RTUnit.SECOND
assert p.getRetentionTimeType() == pyopenms.RetentionTime.RTType.NORMALIZED
c = pyopenms.Compound()
assert c.rts is not None
assert c.id is not None
assert c.molecular_formula is not None
assert c.smiles_string is not None
assert c.theoretical_mass is not None
assert not c.hasCharge()
c.setChargeState(5)
assert c.hasCharge()
assert c.getChargeState() == 5
assert not c.hasRetentionTime()
c.rts = [rt]
assert c.hasRetentionTime()
assert c.getRetentionTime() == 5.0
assert c.getRetentionTimeUnit() == pyopenms.RetentionTime.RTUnit.SECOND
assert c.getRetentionTimeType() == pyopenms.RetentionTime.RTType.NORMALIZED
@report
def testMapAlignment():
"""
@tests: MapAlignmentAlgorithmPoseClustering
MapAlignmentAlgorithmPoseClustering.__init__
MapAlignmentAlgorithmPoseClustering.getDefaults
MapAlignmentAlgorithmPoseClustering.getName
MapAlignmentAlgorithmPoseClustering.getParameters
MapAlignmentAlgorithmPoseClustering.setName
MapAlignmentAlgorithmPoseClustering.setParameters
MapAlignmentAlgorithmPoseClustering.setReference
MapAlignmentAlgorithmPoseClustering.align
MapAlignmentAlgorithmPoseClustering.endProgress
MapAlignmentAlgorithmPoseClustering.getLogType
MapAlignmentAlgorithmPoseClustering.setLogType
MapAlignmentAlgorithmPoseClustering.setProgress
MapAlignmentAlgorithmPoseClustering.startProgress
MapAlignmentTransformer.transformRetentionTimes
"""
ma = pyopenms.MapAlignmentAlgorithmPoseClustering()
assert isinstance(ma.getDefaults(), pyopenms.Param)
assert isinstance(ma.getParameters(), pyopenms.Param)
_testStrOutput(ma.getName())
ma.setName(ma.getName())
ma.getDefaults()
ma.getParameters()
ma.setParameters(ma.getDefaults())
ma.setReference
ma.align
pyopenms.MapAlignmentTransformer.transformRetentionTimes
@report
def testMatrixDouble():
"""
@tests: MatrixDouble
MapAlignmentAlgorithmIdentification.__init__
"""
m = pyopenms.MatrixDouble()
N = 90
m.resize(N-1, N+2, 5.0)
assert m.rows() == 89
assert m.cols() == 92
rows = N-1
cols = N+2
test = []
for i in range(int(rows)):
for j in range(int(cols)):
test.append( m.getValue(i,j) )
testm = np.asarray(test)
testm = testm.reshape(rows, cols)
assert sum(sum(testm)) == 40940.0
assert sum(sum(testm)) == (N-1)*(N+2)*5
matrix = m.get_matrix()
assert sum(sum(matrix)) == 40940.0
assert sum(sum(matrix)) == (N-1)*(N+2)*5
matrix_view = m.get_matrix_as_view()
assert sum(sum(matrix_view)) == 40940.0
assert sum(sum(matrix_view)) == (N-1)*(N+2)*5
# Column = 3 / Row = 5
## Now change a value:
assert m.getValue(3, 5) == 5.0
m.setValue(3, 5, 8.0)
assert m.getValue(3, 5) == 8.0
mat = m.get_matrix_as_view()
assert mat[3, 5] == 8.0
mat = m.get_matrix()
assert m.getValue(3, 5) == 8.0
assert mat[3, 5] == 8.0
# Whatever we change here gets changed in the raw data as well
matrix_view = m.get_matrix_as_view()
matrix_view[1, 6] = 11.0
assert m.getValue(1, 6) == 11.0
assert matrix_view[1, 6] == 11.0
m.clear()
assert m.rows() == 0
assert m.cols() == 0
mat[3, 6] = 9.0
m.set_matrix(mat)
assert m.getValue(3, 5) == 8.0
assert m.getValue(3, 6) == 9.0
@report
def testMapAlignmentIdentification():
"""
@tests: MapAlignmentAlgorithmIdentification
MapAlignmentAlgorithmIdentification.__init__
"""
ma = pyopenms.MapAlignmentAlgorithmIdentification()
assert pyopenms.MapAlignmentAlgorithmIdentification().align is not None
assert pyopenms.MapAlignmentAlgorithmIdentification().setReference is not None
@report
def testMapAlignmentTransformer():
"""
@tests: MapAlignmentTransformer
MapAlignmentTransformer.__init__
"""
ma = pyopenms.MapAlignmentTransformer()
assert pyopenms.MapAlignmentTransformer().transformRetentionTimes is not None
@report
def testMxxxFile():
"""
@tests: MzDataFile
MzDataFile.__init__
MzDataFile.endProgress
MzDataFile.getLogType
MzDataFile.load
MzDataFile.setLogType
MzDataFile.setProgress
MzDataFile.startProgress
MzDataFile.store
MzDataFile.getOptions
MzDataFile.setOptions
MzMLFile.__init__
MzMLFile.endProgress
MzMLFile.getLogType
MzMLFile.load
MzMLFile.setLogType
MzMLFile.setProgress
MzMLFile.startProgress
MzMLFile.store
MzMLFile.getOptions
MzMLFile.setOptions
MzXMLFile.getOptions
MzXMLFile.setOptions
MzXMLFile.__init__
MzXMLFile.endProgress
MzXMLFile.getLogType
MzXMLFile.load
MzXMLFile.setLogType
MzXMLFile.setProgress
MzXMLFile.startProgress
MzXMLFile.store
MzQuantMLFile.__init__
MzQuantMLFile.isSemanticallyValid
MzQuantMLFile.load
MzQuantMLFile.store
"""
mse = pyopenms.MSExperiment()
s = pyopenms.MSSpectrum()
mse.addSpectrum(s)
fh = pyopenms.MzDataFile()
_testProgressLogger(fh)
fh.store("test.mzData", mse)
fh.load("test.mzData", mse)
fh.setOptions(fh.getOptions())
fh = pyopenms.MzMLFile()
_testProgressLogger(fh)
fh.store("test.mzML", mse)
fh.load("test.mzML", mse)
fh.setOptions(fh.getOptions())
myStr = pyopenms.String()
fh.storeBuffer(myStr, mse)
assert len(myStr.toString()) == 5269
mse2 = pyopenms.MSExperiment()
fh.loadBuffer(bytes(myStr), mse2)
assert mse2 == mse
assert mse2.size() == 1
fh = pyopenms.MzXMLFile()
_testProgressLogger(fh)
fh.store("test.mzXML", mse)
fh.load("test.mzXML", mse)
fh.setOptions(fh.getOptions())
fh = pyopenms.MzQuantMLFile()
fh.isSemanticallyValid
fh.load
fh.store
@report
def testParamXMLFile():
"""
@tests: ParamXMLFile
ParamXMLFile.__init__
ParamXMLFile.load
ParamXMLFile.store
"""
fh = pyopenms.ParamXMLFile()
p = pyopenms.Param()
fh.store("test.ini", p)
fh.load("test.ini", p)
@report
def testPeak():
"""
@tests: Peak1D
Peak1D.__init__
Peak1D.getIntensity
Peak1D.getMZ
Peak1D.setIntensity
Peak1D.setMZ
Peak1D.__eq__
Peak1D.__ge__
Peak1D.__gt__
Peak1D.__le__
Peak1D.__lt__
Peak1D.__ne__
Peak2D.__init__
Peak2D.getIntensity
Peak2D.getMZ
Peak2D.getRT
Peak2D.setIntensity
Peak2D.setMZ
Peak2D.setRT
Peak2D.__eq__
Peak2D.__ge__
Peak2D.__gt__
Peak2D.__le__
Peak2D.__lt__
Peak2D.__ne__
"""
p1 = pyopenms.Peak1D()
p1.setIntensity(12.0)
assert p1.getIntensity() == 12.0
p1.setMZ(13.0)
assert p1.getMZ() == 13.0
assert p1 == p1
assert not p1 != p1
p2 = pyopenms.Peak2D()
assert p2 == p2
assert not p2 != p2
p2.setIntensity(22.0)
assert p2.getIntensity() == 22.0
p2.setMZ(23.0)
assert p2.getMZ() == 23.0
p2.setRT(45.0)
assert p2.getRT() == 45.0
@report
def testNumpressCoder():
"""
"""
np = pyopenms.MSNumpressCoder()
nc = pyopenms.NumpressConfig()
nc.np_compression = np.NumpressCompression.LINEAR
nc.estimate_fixed_point = True
tmp = pyopenms.String()
out = []
inp = [1.0, 2.0, 3.0]
np.encodeNP(inp, tmp, True, nc)
res = tmp.toString()
assert len(res) != 0, len(res)
assert res != "", res
np.decodeNP(res, out, True, nc)
assert len(out) == 3, (out, res)
assert out == inp, out
# Now try to use a simple Python string as input -> this will fail as we
# cannot pass this by reference in C++
res = ""
try:
np.encodeNP(inp, res, True, nc)
has_error = False
except AssertionError:
has_error = True
assert has_error
@report
def testNumpressConfig():
"""
"""
n = pyopenms.MSNumpressCoder()
np = pyopenms.NumpressConfig()
np.np_compression = n.NumpressCompression.LINEAR
assert np.np_compression == n.NumpressCompression.LINEAR
np.numpressFixedPoint = 4.2
np.numpressErrorTolerance = 4.2
np.estimate_fixed_point = True
np.linear_fp_mass_acc = 4.2
np.setCompression("linear")
@report
def testBase64():
"""
"""
b = pyopenms.Base64()
out = pyopenms.String()
inp = [1.0, 2.0, 3.0]
b.encode64(inp, b.ByteOrder.BYTEORDER_LITTLEENDIAN, out, False)
res = out.toString()
assert len(res) != 0
assert res != ""
convBack = []
b.decode64(res, b.ByteOrder.BYTEORDER_LITTLEENDIAN, convBack, False)
assert convBack == inp, convBack
# For 32 bit
out = pyopenms.String()
b.encode32(inp, b.ByteOrder.BYTEORDER_LITTLEENDIAN, out, False)
res = out.toString()
assert len(res) != 0
assert res != ""
convBack = []
b.decode32(res, b.ByteOrder.BYTEORDER_LITTLEENDIAN, convBack, False)
assert convBack == inp, convBack
@report
def testPeakFileOptions():
"""
@tests: PeakFileOptions
PeakFileOptions.__init__
PeakFileOptions.addMSLevel
PeakFileOptions.clearMSLevels
PeakFileOptions.containsMSLevel
PeakFileOptions.getCompression
PeakFileOptions.getMSLevels
PeakFileOptions.getMetadataOnly
PeakFileOptions.getWriteSupplementalData
PeakFileOptions.hasMSLevels
PeakFileOptions.setCompression
PeakFileOptions.setMSLevels
PeakFileOptions.setMetadataOnly
PeakFileOptions.setWriteSupplementalData
"""
pfo = pyopenms.PeakFileOptions()
pfo.addMSLevel
pfo.clearMSLevels()
pfo.containsMSLevel(1)
pfo.getCompression()
pfo.getMSLevels()
pfo.getMetadataOnly()
pfo.getWriteSupplementalData()
pfo.hasMSLevels()
pfo.setCompression
pfo.setMSLevels
pfo.setMetadataOnly
pfo.setWriteSupplementalData
@report
def testMRMMapping():
"""
@tests: MRMMapping
MRMMapping.__init__
MRMMapping.map
"""
p = pyopenms.MRMMapping()
assert p.mapExperiment is not None
e = pyopenms.MSExperiment()
c = pyopenms.MSChromatogram()
e.addChromatogram(c)
assert e.getNrChromatograms() == 1
o = pyopenms.MSExperiment()
t = pyopenms.TargetedExperiment()
p.mapExperiment(e, t, o)
assert o.getNrChromatograms() == 0 # not so easy to test
@report
def testPeakPickerMRM():
"""
@tests: PeakPickerMRM
PeakPickerMRM.__init__
PeakPickerMRM.pickChromatogram
"""
p = pyopenms.PeakPickerMRM()
assert p.pickChromatogram is not None
@report
def testPeakPickerHiRes():
"""
@tests: PeakPickerHiRes
PeakPickerHiRes.__init__
PeakPickerHiRes.endProgress
PeakPickerHiRes.getDefaults
PeakPickerHiRes.getLogType
PeakPickerHiRes.getName
PeakPickerHiRes.getParameters
PeakPickerHiRes.pick
PeakPickerHiRes.pickExperiment
PeakPickerHiRes.setLogType
PeakPickerHiRes.setName
PeakPickerHiRes.setParameters
PeakPickerHiRes.setProgress
PeakPickerHiRes.startProgress
"""
p = pyopenms.PeakPickerHiRes()
assert p.pick is not None
assert p.pickExperiment is not None
@report
def testPeakTypeEstimator():
"""
@tests: PeakTypeEstimator
PeakTypeEstimator.__init__
PeakTypeEstimator.estimateType
"""
pyopenms.PeakTypeEstimator().estimateType(pyopenms.MSSpectrum())
@report
def testPeptideHit():
"""
@tests: PeptideHit
PeptideHit.__init__
PeptideHit.addProteinAccession
PeptideHit.clearMetaInfo
PeptideHit.getAAAfter
PeptideHit.getAABefore
PeptideHit.getKeys
PeptideHit.getMetaValue
PeptideHit.getProteinAccessions
PeptideHit.getRank
PeptideHit.getScore
PeptideHit.getSequence
PeptideHit.isMetaEmpty
PeptideHit.metaValueExists
PeptideHit.removeMetaValue
PeptideHit.setAAAfter
PeptideHit.setAABefore
PeptideHit.setCharge
PeptideHit.setMetaValue
PeptideHit.setProteinAccessions
PeptideHit.setRank
PeptideHit.setScore
PeptideHit.setSequence
PeptideHit.__eq__
PeptideHit.__ge__
PeptideHit.__gt__
PeptideHit.__le__
PeptideHit.__lt__
PeptideHit.__ne__
"""
ph = pyopenms.PeptideHit()
assert ph == ph
assert not ph != ph
ph = pyopenms.PeptideHit(1.0, 1, 0, pyopenms.AASequence.fromString("A"))
_testMetaInfoInterface(ph)
assert len(ph.getPeptideEvidences()) == 0
assert ph.getPeptideEvidences() == []
pe = pyopenms.PeptideEvidence()
pe.setProteinAccession('B_id')
ph.addPeptideEvidence(pe)
assert len(ph.getPeptideEvidences()) == 1
assert ph.getPeptideEvidences()[0].getProteinAccession() == 'B_id'
ph.setPeptideEvidences([pe,pe])
assert len(ph.getPeptideEvidences()) == 2
assert ph.getPeptideEvidences()[0].getProteinAccession() == 'B_id'
assert ph.getScore() == 1.0
assert ph.getRank() == 1
assert ph.getSequence().toString() == "A"
ph.setScore(2.0)
assert ph.getScore() == 2.0
ph.setRank(30)
assert ph.getRank() == 30
ph.setSequence(pyopenms.AASequence.fromString("AAA"))
assert ph.getSequence().toString() == "AAA"
assert ph == ph
assert not ph != ph
@report
def testPeptideEvidence():
"""
@tests: PeptideEvidence
PeptideEvidence.__init__
"""
pe = pyopenms.PeptideEvidence()
assert pe == pe
assert not pe != pe
pe.setProteinAccession('B_id')
assert pe.getProteinAccession() == "B_id"
pe.setAABefore(b'A')
assert pe.getAABefore() == 'A'
pe.setAAAfter(b'C')
assert pe.getAAAfter() == 'C'
pe.setStart(5)
assert pe.getStart() == 5
pe.setEnd(9)
assert pe.getEnd() == 9
assert pe == pe
assert not pe != pe
@report
def testPeptideIdentification():
"""
@tests: PeptideIdentification
PeptideIdentification.__init__
PeptideIdentification.assignRanks
PeptideIdentification.clearMetaInfo
PeptideIdentification.empty
PeptideIdentification.getHits
PeptideIdentification.getIdentifier
PeptideIdentification.getKeys
PeptideIdentification.getMetaValue
PeptideIdentification.getNonReferencingHits
PeptideIdentification.getReferencingHits
PeptideIdentification.getScoreType
PeptideIdentification.getSignificanceThreshold
PeptideIdentification.insertHit
PeptideIdentification.isHigherScoreBetter
PeptideIdentification.isMetaEmpty
PeptideIdentification.metaValueExists
PeptideIdentification.removeMetaValue
PeptideIdentification.setHigherScoreBetter
PeptideIdentification.setHits
PeptideIdentification.setIdentifier
PeptideIdentification.setMetaValue
PeptideIdentification.setScoreType
PeptideIdentification.sort
PeptideIdentification.__eq__
PeptideIdentification.__ge__
PeptideIdentification.__gt__
PeptideIdentification.__le__
PeptideIdentification.__lt__
PeptideIdentification.__ne__
PeptideIdentification.setSignificanceThreshold
"""
pi = pyopenms.PeptideIdentification()
_testMetaInfoInterface(pi)
assert pi == pi
assert not pi != pi
pe = pyopenms.PeptideEvidence()
pe.setProteinAccession('B_id')
ph = pyopenms.PeptideHit(1.0, 1, 0, pyopenms.AASequence.fromString("A"))
ph.addPeptideEvidence(pe)
pi.insertHit(ph)
phx, = pi.getHits()
assert phx == ph
pi.setHits([ph])
phx, = pi.getHits()
assert phx == ph
rv = set([])
peptide_hits = pi.getReferencingHits(pi.getHits(), rv)
assert rv == set([])
# assert len(peptide_hits) == 1
assert isinstance(pi.getSignificanceThreshold(), float)
_testStrOutput(pi.getScoreType())
pi.setScoreType("A")
assert isinstance(pi.isHigherScoreBetter(), int)
_testStrOutput(pi.getIdentifier())
pi.setIdentifier("id")
pi.assignRanks()
pi.sort()
assert not pi.empty()
pi.setSignificanceThreshold(6.0)
@report
def testPolarity():
"""
@tests: Polarity
Polarity.NEGATIVE
Polarity.POLNULL
Polarity.POSITIVE
Polarity.SIZE_OF_POLARITY
"""
assert isinstance(pyopenms.IonSource.Polarity.NEGATIVE, int)
assert isinstance(pyopenms.IonSource.Polarity.POLNULL, int)
assert isinstance(pyopenms.IonSource.Polarity.POSITIVE, int)
@report
def testPrecursor():
"""
@tests: Precursor
Precursor.__init__
Precursor.getIntensity
Precursor.getMZ
Precursor.setIntensity
Precursor.setMZ
Precursor.setActivationMethods
Precursor.getActivationMethods
Precursor.setActivationEnergy
Precursor.getActivationEnergy
Precursor.setIsolationWindowUpperOffset
Precursor.getIsolationWindowUpperOffset
Precursor.setIsolationWindowLowerOffset
Precursor.getIsolationWindowLowerOffset
Precursor.setCharge
Precursor.getCharge
Precursor.setPossibleChargeStates
Precursor.getPossibleChargeStates
Precursor.getUnchargedMass
"""
pc = pyopenms.Precursor()
pc.setMZ(123.0)
pc.setIntensity(12.0)
assert pc.getMZ() == 123.0
assert pc.getIntensity() == 12.0
pc.setActivationMethods(pc.getActivationMethods())
pc.setActivationEnergy(6.0)
pc.getActivationEnergy()
pc.setIsolationWindowUpperOffset(500.0)
pc.getIsolationWindowUpperOffset()
pc.setIsolationWindowLowerOffset(600.0)
pc.getIsolationWindowLowerOffset()
pc.setCharge(2)
pc.getCharge()
pc.setPossibleChargeStates(pc.getPossibleChargeStates())
pc.getUnchargedMass()
@report
def testProcessingAction():
"""
@tests: ProcessingAction
ProcessingAction.ALIGNMENT
ProcessingAction.BASELINE_REDUCTION
ProcessingAction.CALIBRATION
ProcessingAction.CHARGE_CALCULATION
ProcessingAction.CHARGE_DECONVOLUTION
ProcessingAction.CONVERSION_DTA
ProcessingAction.CONVERSION_MZDATA
ProcessingAction.CONVERSION_MZML
ProcessingAction.CONVERSION_MZXML
ProcessingAction.DATA_PROCESSING
ProcessingAction.DEISOTOPING
ProcessingAction.FEATURE_GROUPING
ProcessingAction.FILTERING
ProcessingAction.FORMAT_CONVERSION
ProcessingAction.IDENTIFICATION_MAPPING
ProcessingAction.NORMALIZATION
ProcessingAction.PEAK_PICKING
ProcessingAction.PRECURSOR_RECALCULATION
ProcessingAction.QUANTITATION
ProcessingAction.SIZE_OF_PROCESSINGACTION
ProcessingAction.SMOOTHING
"""
assert isinstance(pyopenms.ProcessingAction.ALIGNMENT, int)
assert isinstance(pyopenms.ProcessingAction.BASELINE_REDUCTION, int)
assert isinstance(pyopenms.ProcessingAction.CALIBRATION, int)
assert isinstance(pyopenms.ProcessingAction.CHARGE_CALCULATION, int)
assert isinstance(pyopenms.ProcessingAction.CHARGE_DECONVOLUTION, int)
assert isinstance(pyopenms.ProcessingAction.CONVERSION_DTA, int)
assert isinstance(pyopenms.ProcessingAction.CONVERSION_MZDATA, int)
assert isinstance(pyopenms.ProcessingAction.CONVERSION_MZML, int)
assert isinstance(pyopenms.ProcessingAction.CONVERSION_MZXML, int)
assert isinstance(pyopenms.ProcessingAction.DATA_PROCESSING, int)
assert isinstance(pyopenms.ProcessingAction.DEISOTOPING, int)
assert isinstance(pyopenms.ProcessingAction.FEATURE_GROUPING, int)
assert isinstance(pyopenms.ProcessingAction.FILTERING, int)
assert isinstance(pyopenms.ProcessingAction.FORMAT_CONVERSION, int)
assert isinstance(pyopenms.ProcessingAction.IDENTIFICATION_MAPPING, int)
assert isinstance(pyopenms.ProcessingAction.NORMALIZATION, int)
assert isinstance(pyopenms.ProcessingAction.PEAK_PICKING, int)
assert isinstance(pyopenms.ProcessingAction.PRECURSOR_RECALCULATION, int)
assert isinstance(pyopenms.ProcessingAction.QUANTITATION, int)
assert isinstance(pyopenms.ProcessingAction.SIZE_OF_PROCESSINGACTION, int)
assert isinstance(pyopenms.ProcessingAction.SMOOTHING, int)
@report
def testProduct():
"""
@tests: Product
Product.__init__
Product.getIsolationWindowLowerOffset
Product.getIsolationWindowUpperOffset
Product.getMZ
Product.setIsolationWindowLowerOffset
Product.setIsolationWindowUpperOffset
Product.setMZ
Product.__eq__
Product.__ge__
Product.__gt__
Product.__le__
Product.__lt__
Product.__ne__
"""
p = pyopenms.Product()
p.setMZ(12.0)
p.setIsolationWindowLowerOffset(10.0)
p.setIsolationWindowUpperOffset(15.0)
assert p.getMZ() == 12.0
assert p.getIsolationWindowLowerOffset() == 10.0
assert p.getIsolationWindowUpperOffset() == 15.0
assert p == p
assert not p != p
@report
def testProteinHit():
"""
@tests: ProteinHit
ProteinHit.__init__
ProteinHit.clearMetaInfo
ProteinHit.getAccession
ProteinHit.getCoverage
ProteinHit.getKeys
ProteinHit.getMetaValue
ProteinHit.setMetaValue
ProteinHit.getRank
ProteinHit.__eq__
ProteinHit.__ge__
ProteinHit.__gt__
ProteinHit.__le__
ProteinHit.__lt__
ProteinHit.__ne__
ProteinHit.getScore
ProteinHit.getSequence
ProteinHit.isMetaEmpty
ProteinHit.metaValueExists
ProteinHit.removeMetaValue
ProteinHit.setAccession
ProteinHit.setCoverage
ProteinHit.setRank
ProteinHit.setScore
ProteinHit.setSequence
"""
ph = pyopenms.ProteinHit()
assert ph == ph
assert not ph != ph
_testMetaInfoInterface(ph)
ph.setAccession("A")
ph.setCoverage(0.5)
ph.setRank(2)
ph.setScore(1.5)
ph.setSequence("ABA")
assert ph.getAccession() == ("A")
assert ph.getCoverage() == (0.5)
assert ph.getRank() == (2)
assert ph.getScore() == (1.5)
assert ph.getSequence() == ("ABA")
@report
def testProteinIdentification():
"""
@tests: ProteinIdentification
ProteinIdentification.PeakMassType
ProteinIdentification.__init__
ProteinIdentification.clearMetaInfo
ProteinIdentification.getHits
ProteinIdentification.getKeys
ProteinIdentification.getMetaValue
ProteinIdentification.insertHit
ProteinIdentification.isMetaEmpty
ProteinIdentification.metaValueExists
ProteinIdentification.removeMetaValue
ProteinIdentification.setHits
ProteinIdentification.setMetaValue
ProteinIdentification.__eq__
ProteinIdentification.__ge__
ProteinIdentification.__gt__
ProteinIdentification.__le__
ProteinIdentification.__lt__
ProteinIdentification.__ne__
"""
pi = pyopenms.ProteinIdentification()
_testMetaInfoInterface(pi)
assert pi == pi
assert not pi != pi
assert pi.getHits() == []
ph = pyopenms.ProteinHit()
pi.insertHit(ph)
ph2, = pi.getHits()
assert ph2 == ph
pi.setHits([ph])
ph2, = pi.getHits()
assert ph2 == ph
assert isinstance(pyopenms.ProteinIdentification.PeakMassType.MONOISOTOPIC, int)
assert isinstance(pyopenms.ProteinIdentification.PeakMassType.AVERAGE, int)
@report
def testRichPeak():
"""
@tests: RichPeak1D
RichPeak1D.__init__
RichPeak1D.getIntensity
RichPeak1D.getKeys
RichPeak1D.getMZ
RichPeak1D.__eq__
RichPeak1D.__ge__
RichPeak1D.__gt__
RichPeak1D.__le__
RichPeak1D.__lt__
RichPeak1D.__ne__
RichPeak1D.getMetaValue
RichPeak1D.clearMetaInfo
RichPeak1D.isMetaEmpty
RichPeak1D.metaValueExists
RichPeak1D.removeMetaValue
RichPeak1D.setIntensity
RichPeak1D.setMZ
RichPeak1D.setMetaValue
RichPeak2D.__init__
RichPeak2D.clearUniqueId
RichPeak2D.clearMetaInfo
RichPeak2D.isMetaEmpty
RichPeak2D.ensureUniqueId
RichPeak2D.getIntensity
RichPeak2D.getKeys
RichPeak2D.getMZ
RichPeak2D.getMetaValue
RichPeak2D.getRT
RichPeak2D.getUniqueId
RichPeak2D.hasInvalidUniqueId
RichPeak2D.hasValidUniqueId
RichPeak2D.metaValueExists
RichPeak2D.removeMetaValue
RichPeak2D.setIntensity
RichPeak2D.setMZ
RichPeak2D.setMetaValue
RichPeak2D.setUniqueId
RichPeak2D.setRT
RichPeak2D.__eq__
RichPeak2D.__ge__
RichPeak2D.__gt__
RichPeak2D.__le__
RichPeak2D.__lt__
RichPeak2D.__ne__
"""
p2 = pyopenms.RichPeak2D()
_testMetaInfoInterface(p2)
_testUniqueIdInterface(p2)
assert p2 == p2
assert not p2 != p2
p2.setMZ(22.0)
p2.setIntensity(23.0)
p2.setRT(43.0)
assert p2.getMZ() == (22.0)
assert p2.getIntensity() == (23.0)
assert p2.getRT() == (43.0)
@report
def testSoftware():
"""
@tests: Software
Software.__init__
Software.getName
Software.getVersion
Software.setName
Software.setVersion
"""
sw = pyopenms.Software()
sw.setName("name")
sw.setVersion("1.0.0")
assert sw.getName() == "name"
assert sw.getVersion() == "1.0.0"
@report
def testSourceFile():
"""
@tests: SourceFile
SourceFile.__init__
SourceFile.getChecksum
SourceFile.getChecksumType
SourceFile.getFileSize
SourceFile.getFileType
SourceFile.getNameOfFile
SourceFile.getNativeIDType
SourceFile.getPathToFile
SourceFile.setChecksum
SourceFile.setFileSize
SourceFile.setFileType
SourceFile.setNameOfFile
SourceFile.setNativeIDType
SourceFile.setPathToFile
"""
sf = pyopenms.SourceFile()
sf.setNameOfFile("file.txt")
assert sf.getNameOfFile() == "file.txt"
sf.setPathToFile("file.txt")
assert sf.getPathToFile() == "file.txt"
sf.setFileType(".txt")
assert sf.getFileType() == ".txt"
sf.setChecksum("abcde000", pyopenms.ChecksumType.UNKNOWN_CHECKSUM)
assert sf.getChecksum() == "abcde000"
assert sf.getChecksumType() in (pyopenms.ChecksumType.UNKNOWN_CHECKSUM,
pyopenms.ChecksumType.SHA1,
pyopenms.ChecksumType.MD5)
@report
def testSpectrumSetting(s=pyopenms.SpectrumSettings()):
"""
@tests: SpectrumSettings
SpectrumSettings.SpectrumType
SpectrumSettings.__init__
SpectrumSettings.getAcquisitionInfo
SpectrumSettings.getComment
SpectrumSettings.getDataProcessing
SpectrumSettings.getInstrumentSettings
SpectrumSettings.getNativeID
SpectrumSettings.getPeptideIdentifications
SpectrumSettings.getPrecursors
SpectrumSettings.getProducts
SpectrumSettings.getSourceFile
SpectrumSettings.getType
SpectrumSettings.setAcquisitionInfo
SpectrumSettings.setComment
SpectrumSettings.setDataProcessing
SpectrumSettings.setInstrumentSettings
SpectrumSettings.setNativeID
SpectrumSettings.setPeptideIdentifications
SpectrumSettings.setPrecursors
SpectrumSettings.setProducts
SpectrumSettings.setSourceFile
SpectrumSettings.setType
SpectrumSettings.unify
"""
assert s.getType() in [ pyopenms.SpectrumSettings.SpectrumType.UNKNOWN,
pyopenms.SpectrumSettings.SpectrumType.PEAKS,
pyopenms.SpectrumSettings.SpectrumType.RAWDATA]
assert isinstance(s.getAcquisitionInfo(), pyopenms.AcquisitionInfo)
assert isinstance(s.getInstrumentSettings(), pyopenms.InstrumentSettings)
assert isinstance(s.getSourceFile(), pyopenms.SourceFile)
assert isinstance(s.getPeptideIdentifications(), list)
assert isinstance(s.getDataProcessing(), list)
s.setAcquisitionInfo(s.getAcquisitionInfo())
s.setInstrumentSettings(s.getInstrumentSettings())
s.setSourceFile(s.getSourceFile())
s.setPeptideIdentifications(s.getPeptideIdentifications())
s.setDataProcessing(s.getDataProcessing())
s.setComment(s.getComment())
s.setPrecursors(s.getPrecursors())
s.setProducts(s.getProducts())
s.setType(s.getType())
s.setNativeID(s.getNativeID())
s.setType(s.getType())
if isinstance(s, pyopenms.SpectrumSettings):
s.unify(s)
@report
def testTransformationDescription():
"""
@tests: TransformationDescription
TransformationDescription.__init__
TransformationDescription.apply
TransformationDescription.getDataPoints
TransformationDescription.fitModel
TransformationDescription.getModelParameters
TransformationDescription.getModelType
TransformationDescription.invert
"""
td = pyopenms.TransformationDescription()
assert td.getDataPoints() == []
assert isinstance(td.apply(0.0), float)
td.fitModel
p = td.getModelParameters()
td.getModelType()
td.invert
@report
def testTransformationModels():
"""
@tests: TransformationModelInterpolated
TransformationModelInterpolated.getDefaultParameters
TransformationModelInterpolated.getParameters
TransformationModelLinear.getDefaultParameters
TransformationModelLinear.getParameters
TransformationModelBSpline.getDefaultParameters
TransformationModelBSpline.getParameters
TransformationModelLowess.getDefaultParameters
TransformationModelLowess.getParameters
NB: THIS TEST STOPS AFTER THE FIRST FAILURE
"""
for clz in [pyopenms.TransformationModelLinear,
pyopenms.TransformationModelBSpline,
pyopenms.TransformationModelInterpolated,
pyopenms.TransformationModelLowess]:
p = pyopenms.Param()
data = [ pyopenms.TM_DataPoint(9.0, 8.9),
pyopenms.TM_DataPoint(5.0, 6.0),
pyopenms.TM_DataPoint(8.0, 8.0) ]
mod = clz(data, p)
mod.evaluate(7.0)
mod.getDefaultParameters(p)
@report
def testTransformationXMLFile():
"""
@tests: TransformationXMLFile
TransformationXMLFile.__init__
TransformationXMLFile.load
TransformationXMLFile.store
"""
fh = pyopenms.TransformationXMLFile()
td = pyopenms.TransformationDescription()
fh.store("test.transformationXML", td)
fh.load("test.transformationXML", td, True)
assert td.getDataPoints() == []
@report
def testIBSpectraFile():
"""
@tests: IBSpectraFile
IBSpectraFile.__init__
IBSpectraFile.store
"""
fh = pyopenms.IBSpectraFile()
cmap = pyopenms.ConsensusMap()
correctError = False
try:
fh.store( pyopenms.String("test.ibspectra.file"), cmap)
assert False
except RuntimeError:
correctError = True
assert correctError
@report
def testSwathFile():
"""
@tests: SwathFile
SwathFile.__init__
SwathFile.store
"""
fh = pyopenms.SwathFile()
@report
def testType():
"""
@tests: Type
Type.CONSENSUSXML
Type.DTA
Type.DTA2D
Type.EDTA
Type.FASTA
Type.FEATUREXML
Type.GELML
Type.HARDKLOER
Type.IDXML
Type.INI
Type.KROENIK
Type.MASCOTXML
Type.MGF
Type.MS2
Type.MSP
Type.MZDATA
Type.MZIDENTML
Type.MZML
Type.MZXML
Type.OMSSAXML
Type.PEPLIST
Type.PEPXML
Type.PNG
Type.PROTXML
Type.SIZE_OF_TYPE
Type.TOPPAS
Type.TRAML
Type.TRANSFORMATIONXML
Type.TSV
Type.UNKNOWN
Type.XMASS
"""
for ti in [
pyopenms.FileType.CONSENSUSXML
,pyopenms.FileType.DTA
,pyopenms.FileType.DTA2D
,pyopenms.FileType.EDTA
,pyopenms.FileType.FASTA
,pyopenms.FileType.FEATUREXML
,pyopenms.FileType.GELML
,pyopenms.FileType.HARDKLOER
,pyopenms.FileType.IDXML
,pyopenms.FileType.INI
,pyopenms.FileType.KROENIK
,pyopenms.FileType.MASCOTXML
,pyopenms.FileType.MGF
,pyopenms.FileType.MS2
,pyopenms.FileType.MSP
,pyopenms.FileType.MZDATA
,pyopenms.FileType.MZIDENTML
,pyopenms.FileType.MZML
,pyopenms.FileType.MZXML
,pyopenms.FileType.OMSSAXML
,pyopenms.FileType.PEPLIST
,pyopenms.FileType.PEPXML
,pyopenms.FileType.PNG
,pyopenms.FileType.PROTXML
,pyopenms.FileType.SIZE_OF_TYPE
,pyopenms.FileType.TOPPAS
,pyopenms.FileType.TRAML
,pyopenms.FileType.TRANSFORMATIONXML
,pyopenms.FileType.TSV
,pyopenms.FileType.UNKNOWN
,pyopenms.FileType.XMASS]:
assert isinstance(ti, int)
@report
def testVersion():
"""
@tests: VersionDetails
VersionDetails.__init__
VersionDetails.create
VersionDetails.version_major
VersionDetails.version_minor
VersionDetails.version_patch
VersionDetails.__eq__
VersionDetails.__ge__
VersionDetails.__gt__
VersionDetails.__le__
VersionDetails.__lt__
VersionDetails.__ne__
VersionInfo.getRevision
VersionInfo.getTime
VersionInfo.getVersion
version.version
"""
_testStrOutput(pyopenms.VersionInfo.getVersion())
_testStrOutput(pyopenms.VersionInfo.getRevision())
_testStrOutput(pyopenms.VersionInfo.getTime())
vd = pyopenms.VersionDetails.create("19.2.1")
assert vd.version_major == 19
assert vd.version_minor == 2
assert vd.version_patch == 1
vd = pyopenms.VersionDetails.create("19.2.1-alpha")
assert vd.version_major == 19
assert vd.version_minor == 2
assert vd.version_patch == 1
assert vd.pre_release_identifier == "alpha"
assert vd == vd
assert not vd < vd
assert not vd > vd
assert isinstance(pyopenms.version.version, str)
@report
def testInspectInfile():
"""
@tests: InspectInfile
InspectInfile.__init__
"""
inst = pyopenms.InspectInfile()
assert inst.getModifications is not None
mods = inst.getModifications()
assert len(mods) == 0
@report
def testIsotopeMarker():
"""
@tests: IsotopeMarker
IsotopeMarker.__init__
"""
inst = pyopenms.IsotopeMarker()
ptr = inst.create()
assert ptr.apply is not None
res = {}
spec = pyopenms.MSSpectrum()
ptr.apply(res, spec)
@report
def testAttachment():
"""
@tests: Attachment
Attachment.__init__
"""
inst = pyopenms.Attachment()
assert inst.name is not None
assert inst.value is not None
assert inst.cvRef is not None
assert inst.cvAcc is not None
assert inst.unitRef is not None
assert inst.unitAcc is not None
assert inst.binary is not None
assert inst.qualityRef is not None
assert inst.colTypes is not None
assert inst.tableRows is not None
assert inst.toXMLString is not None
assert inst.toCSVString is not None
inst.name = "test"
inst.value = "test"
inst.cvRef = "test"
inst.cvAcc = "test"
inst.unitRef = "test"
inst.unitAcc = "test"
inst.binary = "test"
inst.qualityRef = "test"
inst.colTypes = [ b"test", b"test2"]
inst.tableRows = [ [b"test", b"test2"], [b"otherTest"] ]
assert inst.tableRows[1][0] == b"otherTest"
@report
def testOptimizePeakDeconvolution():
"""
@tests: OptimizePeakDeconvolution
OptimizePeakDeconvolution.__init__
"""
inst = pyopenms.OptimizePeakDeconvolution()
assert inst.getParameters
assert inst.getPenalties is not None
assert inst.setPenalties is not None
assert inst.getCharge is not None
assert inst.setCharge is not None
assert inst.optimize is not None
inst = pyopenms.PenaltyFactorsIntensity()
assert inst.height is not None
inst = pyopenms.OptimizePeakDeconvolution_Data()
assert inst.peaks is not None
assert inst.peaks is not None
assert inst.signal is not None
assert inst.penalties is not None
assert inst.charge is not None
@report
def testKernelMassTrace():
trace = pyopenms.Kernel_MassTrace()
assert trace.getSize is not None
assert trace.getLabel is not None
assert trace.setLabel is not None
assert trace.getCentroidMZ is not None
assert trace.getCentroidRT is not None
assert trace.getCentroidSD is not None
assert trace.getFWHM is not None
assert trace.getTraceLength is not None
assert trace.getFWHMborders is not None
assert trace.getSmoothedIntensities is not None
assert trace.getAverageMS1CycleTime is not None
assert trace.computeSmoothedPeakArea is not None
assert trace.computePeakArea is not None
assert trace.findMaxByIntPeak is not None
assert trace.estimateFWHM is not None
assert trace.computeFwhmArea is not None
assert trace.computeFwhmAreaSmooth is not None
# assert trace.computeFwhmAreaRobust is not None
# assert trace.computeFwhmAreaSmoothRobust is not None
assert trace.getIntensity is not None
assert trace.getMaxIntensity is not None
assert trace.getConvexhull is not None
assert trace.setCentroidSD is not None
assert trace.setSmoothedIntensities is not None
assert trace.updateSmoothedMaxRT is not None
assert trace.updateWeightedMeanRT is not None
assert trace.updateSmoothedWeightedMeanRT is not None
assert trace.updateMedianRT is not None
assert trace.updateMedianMZ is not None
assert trace.updateMeanMZ is not None
assert trace.updateWeightedMeanMZ is not None
assert trace.updateWeightedMZsd is not None
s = trace.getSize()
@report
def testElutionPeakDetection():
detection = pyopenms.ElutionPeakDetection()
assert detection.detectPeaks is not None
assert detection.filterByPeakWidth is not None
assert detection.computeMassTraceNoise is not None
assert detection.computeMassTraceSNR is not None
assert detection.computeApexSNR is not None
assert detection.findLocalExtrema is not None
assert detection.smoothData is not None
trace = pyopenms.Kernel_MassTrace()
detection.smoothData(trace, 4)
@report
def testIndexedMzMLDecoder():
decoder = pyopenms.IndexedMzMLDecoder()
try:
pos = decoder.findIndexListOffset("abcde", 100)
raise Exception("Should raise an error")
except RuntimeError:
pass
def test_streampos():
p = long(pyopenms.streampos())
assert isinstance(p, long), "got %r" % p
def test_MapConversion():
feature = pyopenms.Feature()
feature.setRT(99)
cmap = pyopenms.ConsensusMap()
fmap = pyopenms.FeatureMap()
fmap.push_back(feature)
pyopenms.MapConversion().convert(0, fmap, cmap, 1)
assert(cmap.size() == 1)
assert(cmap[0].getRT() == 99.0)
fmap = pyopenms.FeatureMap()
pyopenms.MapConversion().convert(cmap, True, fmap)
assert(fmap.size() == 1)
assert(fmap[0].getRT() == 99.0)
exp = pyopenms.MSExperiment()
sp = pyopenms.MSSpectrum()
peak = pyopenms.Peak1D()
peak.setIntensity(10)
peak.setMZ(20)
sp.push_back(peak)
exp.addSpectrum(sp)
exp.addSpectrum(sp)
cmap = pyopenms.ConsensusMap()
pyopenms.MapConversion().convert(0, exp, cmap, 2)
assert(cmap.size() == 2)
assert(cmap[0].getIntensity() == 10.0)
assert(cmap[0].getMZ() == 20.0)
def test_BSpline2d():
x = [1.0, 6.0, 8.0, 10.0, 15.0]
y = [2.0, 5.0, 6.0, 12.0, 13.0]
spline = pyopenms.BSpline2d(x,y,0, pyopenms.BoundaryCondition.BC_ZERO_ENDPOINTS, 0)
assert spline.ok()
assert abs(spline.eval(6.0) - 5.0 < 0.01)
assert abs(spline.derivative(6.0) - 5.0 < 0.01)
y_new = [4.0, 5.0, 6.0, 12.0, 13.0]
spline.solve(y_new)
assert spline.ok()
assert abs(spline.eval(6.0) - 5.0 < 0.01)
@report
def testConsensusIDAlgorithmAverage():
algo = pyopenms.ConsensusIDAlgorithmAverage()
assert algo.apply
@report
def testConsensusIDAlgorithmBest():
algo = pyopenms.ConsensusIDAlgorithmBest()
assert algo.apply
@report
def testConsensusIDAlgorithmIdentity():
algo = pyopenms.ConsensusIDAlgorithmIdentity()
assert algo.apply
@report
def testConsensusIDAlgorithmPEPIons():
algo = pyopenms.ConsensusIDAlgorithmPEPIons()
assert algo.apply
@report
def testConsensusIDAlgorithmPEPMatrix():
algo = pyopenms.ConsensusIDAlgorithmPEPMatrix()
assert algo.apply
@report
def testConsensusIDAlgorithmRanks():
algo = pyopenms.ConsensusIDAlgorithmRanks()
assert algo.apply
@report
def testConsensusIDAlgorithmSimilarity():
algo = pyopenms.ConsensusIDAlgorithmSimilarity()
assert algo.apply
@report
def testConsensusIDAlgorithmWorst():
algo = pyopenms.ConsensusIDAlgorithmWorst()
assert algo.apply
@report
def testDigestionEnzymeProtein():
f = pyopenms.EmpiricalFormula()
regex_description = ""
psi_id = ""
xtandem_id = ""
comet_id = 0
omssa_id = 0
e = pyopenms.DigestionEnzymeProtein("testEnzyme", "K", set([]), regex_description,
f, f, psi_id, xtandem_id, comet_id, omssa_id)
@report
def testMRMAssay():
e = pyopenms.MRMAssay()
assert e
@report
def testMRMIonSeries():
e = pyopenms.MRMIonSeries()
assert e
@report
def testPeptideIndexing():
e = pyopenms.PeptideIndexing()
assert e
@report
def testPeptideProteinResolution():
e = pyopenms.PeptideProteinResolution(False)
assert e
@report
def testPercolatorOutfile():
e = pyopenms.PercolatorOutfile()
assert e
@report
def testHiddenMarkovModel():
hmm = pyopenms.HiddenMarkovModel()
assert hmm
assert hmm.getNumberOfStates() == 0
ss = s("testState")
hmm.addNewState(ss)
assert hmm.getNumberOfStates() == 1
e = pyopenms.HMMState()
# hmm.addNewState(e) # Segfault !
r = hmm.getState(s("testState"))
assert r
## assert r == ss # requires ==
@report
def testHMMState():
e = pyopenms.HMMState()
assert e
e.setName(s("somename"))
assert e.getName() == "somename", e.getName()
e.setHidden(True)
assert e.isHidden()
pre = pyopenms.HMMState()
pre.setName(s("pre"))
suc = pyopenms.HMMState()
suc.setName(s("suc"))
e.addPredecessorState(pre)
e.addSuccessorState(suc)
assert e.getPredecessorStates()
assert e.getSuccessorStates()
@report
def testProteaseDB():
edb = pyopenms.ProteaseDB()
f = pyopenms.EmpiricalFormula()
synonyms = set(["dummy", "other"])
assert edb.hasEnzyme(pyopenms.String("Trypsin"))
trypsin = edb.getEnzyme(pyopenms.String("Trypsin"))
names = []
edb.getAllNames(names)
assert b"Trypsin" in names
@report
def testElementDB():
edb = pyopenms.ElementDB()
del edb
# create a second instance of ElementDB without anything bad happening
edb = pyopenms.ElementDB()
assert edb.hasElement(16)
edb.hasElement(pyopenms.String("O"))
e = edb.getElement(16)
assert e.getName() == "Sulfur"
assert e.getSymbol() == "S"
assert e.getIsotopeDistribution()
e2 = edb.getElement(pyopenms.String("O"))
assert e2.getName() == "Oxygen"
assert e2.getSymbol() == "O"
assert e2.getIsotopeDistribution()
# assert e == e2
# not yet implemented
#
# const Map[ String, Element * ] getNames() nogil except +
# const Map[ String, Element * ] getSymbols() nogil except +
# const Map[unsigned int, Element * ] getAtomicNumbers() nogil except +
@report
def testDPosition():
dp = pyopenms.DPosition1()
dp = pyopenms.DPosition1(1.0)
assert dp[0] == 1.0
dp = pyopenms.DPosition2()
dp = pyopenms.DPosition2(1.0, 2.0)
assert dp[0] == 1.0
assert dp[1] == 2.0
@report
def testResidueDB():
rdb = pyopenms.ResidueDB()
del rdb
# create a second instance of ResidueDB without anything bad happening
rdb = pyopenms.ResidueDB()
assert rdb.getNumberOfResidues() >= 20
assert len(rdb.getResidueSets() ) >= 1
el = rdb.getResidues(pyopenms.String(rdb.getResidueSets().pop()))
assert len(el) >= 1
assert rdb.hasResidue(s("Glycine"))
glycine = rdb.getResidue(s("Glycine"))
nrr = rdb.getNumberOfResidues()
@report
def testModificationsDB():
mdb = pyopenms.ModificationsDB()
del mdb
# create a second instance of ModificationsDB without anything bad happening
mdb = pyopenms.ModificationsDB()
assert mdb.getNumberOfModifications() > 1
m = mdb.getModification(1)
assert mdb.getNumberOfModifications() > 1
m = mdb.getModification(1)
assert m is not None
mods = set([])
mdb.searchModifications(mods, s("Phosphorylation"), s("T"), pyopenms.ResidueModification.TermSpecificity.ANYWHERE)
assert len(mods) == 1
mods = set([])
mdb.searchModifications(mods, s("NIC"), s("T"), pyopenms.ResidueModification.TermSpecificity.N_TERM)
assert len(mods) == 1
mods = set([])
mdb.searchModifications(mods, s("NIC"), s("T"), pyopenms.ResidueModification.TermSpecificity.N_TERM)
assert len(mods) == 1
mods = set([])
mdb.searchModifications(mods, s("Acetyl"), s("T"), pyopenms.ResidueModification.TermSpecificity.N_TERM)
assert len(mods) == 1
assert list(mods)[0].getFullId() == "Acetyl (N-term)"
m = mdb.getModification(s("Carboxymethyl (C)"), "", pyopenms.ResidueModification.TermSpecificity.NUMBER_OF_TERM_SPECIFICITY)
assert m.getFullId() == "Carboxymethyl (C)"
m = mdb.getModification( s("Phosphorylation"), s("S"), pyopenms.ResidueModification.TermSpecificity.ANYWHERE)
assert m.getId() == "Phospho"
# get out all mods (there should be many, some known ones as well!)
mods = []
m = mdb.getAllSearchModifications(mods)
assert len(mods) > 100
assert b"Phospho (S)" in mods
assert b"Sulfo (S)" in mods
assert not (b"Phospho" in mods)
# search for specific modifications by mass
m = mdb.getBestModificationByDiffMonoMass( 80.0, 1.0, "T", pyopenms.ResidueModification.TermSpecificity.ANYWHERE)
assert m is not None
assert m.getId() == "Phospho"
assert m.getFullName() == "Phosphorylation"
assert m.getUniModAccession() == "UniMod:21"
m = mdb.getBestModificationByDiffMonoMass(80, 100, "T", pyopenms.ResidueModification.TermSpecificity.ANYWHERE)
assert m is not None
assert m.getId() == "Phospho"
assert m.getFullName() == "Phosphorylation"
assert m.getUniModAccession() == "UniMod:21"
m = mdb.getBestModificationByDiffMonoMass(16, 1.0, "M", pyopenms.ResidueModification.TermSpecificity.ANYWHERE)
assert m is not None
assert m.getId() == "Oxidation", m.getId()
assert m.getFullName() == "Oxidation or Hydroxylation", m.getFullName()
assert m.getUniModAccession() == "UniMod:35"
@report
def testRNaseDB():
"""
@tests: RNaseDB
const DigestionEnzymeRNA* getEnzyme(const String& name) nogil except +
const DigestionEnzymeRNA* getEnzymeByRegEx(const String& cleavage_regex) nogil except +
void getAllNames(libcpp_vector[ String ]& all_names) nogil except +
bool hasEnzyme(const String& name) nogil except +
bool hasRegEx(const String& cleavage_regex) nogil except +
"""
db = pyopenms.RNaseDB()
names = []
db.getAllNames(names)
e = db.getEnzyme("RNase_T1")
assert e.getRegEx() == u'(?<=G)'
assert e.getThreePrimeGain() == u'p'
assert db.hasRegEx(u'(?<=G)')
assert db.hasEnzyme("RNase_T1")
@report
def testRibonucleotideDB():
"""
@tests: RibonucleotideDB
"""
r = pyopenms.RibonucleotideDB()
uridine = r.getRibonucleotide(b"U")
assert uridine.getName() == u'uridine'
assert uridine.getCode() == u'U'
assert uridine.getFormula().toString() == u'C9H12N2O6'
assert uridine.isModified() == False
@report
def testRibonucleotide():
"""
@tests: Ribonucleotide
"""
r = pyopenms.Ribonucleotide()
assert not r.isModified()
r.setHTMLCode("test")
assert r.getHTMLCode() == "test"
r.setOrigin(b"A")
assert r.getOrigin() == "A"
r.setNewCode(b"A")
assert r.getNewCode() == "A"
@report
def testRNaseDigestion():
"""
@tests: RNaseDigestion
"""
dig = pyopenms.RNaseDigestion()
dig.setEnzyme("RNase_T1")
assert dig.getEnzymeName() == "RNase_T1"
oligo = pyopenms.NASequence.fromString("pAUGUCGCAG");
result = []
dig.digest(oligo, result)
assert len(result) == 3
@report
def testNASequence():
"""
@tests: NASequence
"""
oligo = pyopenms.NASequence.fromString("pAUGUCGCAG");
assert oligo.size() == 9
seq_formula = oligo.getFormula()
seq_formula.toString() == u'C86H108N35O64P9'
oligo_mod = pyopenms.NASequence.fromString("A[m1A][Gm]A")
seq_formula = oligo_mod.getFormula()
seq_formula.toString() == u'C42H53N20O23P3'
for r in oligo:
pass
assert oligo_mod[1].isModified()
charge = 2
oligo_mod.getMonoWeight(pyopenms.NASequence.NASFragmentType.WIon, charge)
oligo_mod.getFormula(pyopenms.NASequence.NASFragmentType.WIon, charge)
@report
def testExperimentalDesign():
"""
@tests: ExperimentalDesign
ExperimentalDesign.__init__
ExperimentalDesign.getNumberOfSamples() == 8
ExperimentalDesign.getNumberOfFractions() == 3
ExperimentalDesign.getNumberOfLabels() == 4
ExperimentalDesign.getNumberOfMSFiles() == 6
ExperimentalDesign.getNumberOfFractionGroups() == 2
ExperimentalDesign.getSample(1, 1) == 1
ExperimentalDesign.getSample(2, 4) == 8
ExperimentalDesign.isFractionated()
ExperimentalDesign.sameNrOfMSFilesPerFraction()
ExperimentalDesignFile.__init__
ExperimentalDesignFile.load
"""
f = pyopenms.ExperimentalDesignFile()
fourplex_fractionated_design = pyopenms.ExperimentalDesign()
ed_dirname = os.path.dirname(os.path.abspath(__file__))
ed_filename = os.path.join(ed_dirname, "ExperimentalDesign_input_2.tsv").encode()
fourplex_fractionated_design = pyopenms.ExperimentalDesignFile.load(ed_filename, False)
assert fourplex_fractionated_design.getNumberOfSamples() == 8
assert fourplex_fractionated_design.getNumberOfFractions() == 3
assert fourplex_fractionated_design.getNumberOfLabels() == 4
assert fourplex_fractionated_design.getNumberOfMSFiles() == 6
assert fourplex_fractionated_design.getNumberOfFractionGroups() == 2
assert fourplex_fractionated_design.getSample(1, 1) == 1
assert fourplex_fractionated_design.getSample(2, 4) == 8
assert fourplex_fractionated_design.isFractionated()
assert fourplex_fractionated_design.sameNrOfMSFilesPerFraction()
@report
def testString():
pystr = pyopenms.String()
pystr = pyopenms.String("blah")
assert (pystr.toString() == "blah")
pystr = pyopenms.String("blah")
assert (pystr.toString() == "blah")
pystr = pyopenms.String(u"blah")
assert (pystr.toString() == "blah")
pystr = pyopenms.String(pystr)
assert (pystr.toString() == "blah")
assert (len(pystr.toString()) == 4)
cstr = pystr.c_str()
# Printing should work ...
print(cstr)
print(pystr)
print(pystr.toString())
assert (pystr.toString() == "blah")
pystr = pyopenms.String("bläh")
assert (pystr.toString() == u"bläh")
pystr = pyopenms.String("bläh")
pystr = pyopenms.String(u"bläh")
assert (pystr.toString() == u"bläh")
pystr = pyopenms.String(pystr)
assert (pystr.toString() == u"bläh")
cstr = pystr.c_str()
# Printing should work ...
print(cstr)
print(pystr)
print(pystr.toString().encode("utf8"))
assert len(pystr.toString()) == 4
assert len(pystr.c_str()) == 5 # C does not know about Unicode, so be careful with c_str
print(pystr) # this prints the C string, due to Py 2/3 compatibility
print(pystr.toString().encode("utf8")) # this prints the correct String
pystr1 = pyopenms.String("bläh")
pystr2 = pyopenms.String("bläh")
assert(pystr1 == pystr2)
pystr1 = pyopenms.String(u"bläh")
pystr2 = pyopenms.String(u"bläh")
assert(pystr1 == pystr2)
# Handling of different Unicode Strings:
# - unicode is natively stored in OpenMS::String
# - encoded bytesequences for utf8, utf16 and iso8859 can be stored as
# char arrays in OpenMS::String (and be accessed using c_str())
# - encoded bytesequences for anything other than utf8 cannot use
# "toString()" as this function expects utf8
ustr = u"bläh"
pystr = pyopenms.String(ustr)
assert (pystr.toString() == u"bläh")
pystr = pyopenms.String(ustr.encode("utf8"))
assert (pystr.toString() == u"bläh")
pystr = pyopenms.String(ustr.encode("iso8859_15"))
assert (pystr.c_str().decode("iso8859_15") == u"bläh")
pystr = pyopenms.String(ustr.encode("utf16"))
assert (pystr.c_str().decode("utf16") == u"bläh")
# toString will throw as its not UTF8
pystr = pyopenms.String(ustr.encode("iso8859_15"))
didThrow = False
try:
pystr.toString()
except UnicodeDecodeError:
didThrow = True
assert didThrow
# toString will throw as its not UTF8
pystr = pyopenms.String(ustr.encode("utf16"))
didThrow = False
try:
pystr.toString()
except UnicodeDecodeError:
didThrow = True
assert didThrow
# Handling of automatic conversions of String return values
# -- return a native str when utf8 is used
# -- return a OpenMS::String object when encoding with utf8 is not possible
ustr = u"bläh"
s = pyopenms.MSSpectrum()
s.setNativeID(ustr)
r = s.getNativeID()
# assert( isinstance(r, str) ) # native, returns str
assert(r == u"bläh")
s.setNativeID(ustr.encode("utf8"))
r = s.getNativeID()
# assert( isinstance(r, str) )
assert(r == u"bläh")
s.setNativeID(ustr.encode("utf16"))
r = s.getNativeID()
# assert( isinstance(r, bytes) )
# assert(r.c_str().decode("utf16") == u"bläh")
s.setNativeID(ustr.encode("iso8859_15"))
r = s.getNativeID()
# assert( isinstance(r, bytes) )
assert(r.decode("iso8859_15") == u"bläh")
| 26.995928 | 128 | 0.69332 |
from __future__ import print_function
import pyopenms
import copy
import os
from pyopenms import String as s
import numpy as np
print("IMPORTED ", pyopenms.__file__)
try:
long
except NameError:
long = int
from functools import wraps
import sys
def _testStrOutput(input_str):
if sys.version_info[0] < 3:
assert isinstance(input_str, unicode)
else:
assert isinstance( input_str, str)
def report(f):
@wraps(f)
def wrapper(*a, **kw):
print("run ", f.__name__)
f(*a, **kw)
return wrapper
@report
def _testMetaInfoInterface(what):
what.setMetaValue("key", 42)
what.setMetaValue("key2", 42)
keys = []
what.getKeys(keys)
assert len(keys) and all(isinstance(k, bytes) for k in keys)
assert what.getMetaValue(keys[0]) == 42
assert what.metaValueExists("key")
what.removeMetaValue("key")
keys = []
what.getKeys(keys)
assert what.getMetaValue(keys[0]) == 42
what.clearMetaInfo()
keys = []
what.getKeys(keys)
assert len(keys) == 0
@report
def _testUniqueIdInterface(what):
assert what.hasInvalidUniqueId()
assert not what.hasValidUniqueId()
assert what.ensureUniqueId()
assert isinstance(what.getUniqueId(), (int, long))
assert what.getUniqueId() > 0
assert not what.hasInvalidUniqueId()
assert what.hasValidUniqueId()
what.clearUniqueId()
assert what.getUniqueId() == 0
assert what.hasInvalidUniqueId()
assert not what.hasValidUniqueId()
assert what.ensureUniqueId()
assert isinstance(what.getUniqueId(), (int, long))
assert what.getUniqueId() > 0
assert not what.hasInvalidUniqueId()
assert what.hasValidUniqueId()
what.setUniqueId(1234)
assert what.getUniqueId() == 1234
def _testProgressLogger(ff):
ff.setLogType(pyopenms.LogType.NONE)
assert ff.getLogType() == pyopenms.LogType.NONE
ff.startProgress(0, 3, "label")
ff.setProgress(0)
ff.setProgress(1)
ff.setProgress(2)
ff.setProgress(3)
ff.endProgress()
@report
def testSpectrumAlignment():
pyopenms.SpectrumAlignment
pyopenms.SpectrumAlignment.__init__
pyopenms.SpectrumAlignment.getDefaults
pyopenms.SpectrumAlignment.getParameters
pyopenms.SpectrumAlignment.setParameters
spec = pyopenms.MSSpectrum()
p = pyopenms.Peak1D()
p.setMZ(1000.0)
p.setIntensity(200.0)
spec.push_back(p)
p.setMZ(2000.0)
p.setIntensity(200.0)
spec.push_back(p)
rich_spec = pyopenms.MSSpectrum()
p = pyopenms.Peak1D()
p.setMZ(1000.001)
p.setIntensity(200.0)
rich_spec.push_back(p)
p.setMZ(2000.001)
p.setIntensity(200.0)
rich_spec.push_back(p)
p.setMZ(3000.001)
p.setIntensity(200.0)
rich_spec.push_back(p)
aligner = pyopenms.SpectrumAlignment()
result = []
aligner.getSpectrumAlignment(result, spec, spec)
assert result == [ (0,0), (1,1) ], result
aligner.getSpectrumAlignment(result, rich_spec, spec)
assert result == [ (0,0), (1,1) ], result
aligner.getSpectrumAlignment(result, spec, rich_spec)
assert result == [ (0,0), (1,1) ], result
aligner.getSpectrumAlignment(result, rich_spec, rich_spec)
assert result == [ (0,0), (1,1), (2,2) ], result
aligner = pyopenms.SpectrumAlignmentScore()
assert isinstance(aligner(spec), float)
assert isinstance(aligner(rich_spec), float)
assert isinstance(aligner(spec, rich_spec), float)
assert isinstance(aligner(rich_spec, spec), float)
assert isinstance(aligner(spec, spec), float)
assert isinstance(aligner(rich_spec, rich_spec), float)
@report
def testAASequence():
aas = pyopenms.AASequence()
aas + aas
aas += aas
aas.__doc__
aas = pyopenms.AASequence.fromString("DFPIANGER")
assert aas.getCTerminalModificationName() == ""
assert aas.getNTerminalModificationName() == ""
aas.setCTerminalModification("")
aas.setNTerminalModification("")
assert aas.toString() == "DFPIANGER"
assert aas.toUnmodifiedString() == "DFPIANGER"
aas = pyopenms.AASequence.fromStringPermissive("DFPIANGER", True)
assert aas.toString() == "DFPIANGER"
assert aas.toUnmodifiedString() == "DFPIANGER"
seq = pyopenms.AASequence.fromString("PEPTIDESEKUEM(Oxidation)CER")
assert seq.toString() == "PEPTIDESEKUEM(Oxidation)CER"
assert seq.toUnmodifiedString() == "PEPTIDESEKUEMCER"
assert seq.toBracketString() == "PEPTIDESEKUEM[147]CER"
assert seq.toBracketString(True) == "PEPTIDESEKUEM[147]CER"
assert seq.toBracketString(False) == "PEPTIDESEKUEM[147.03540001709996]CER" or \
seq.toBracketString(False) == "PEPTIDESEKUEM[147.035400017100017]CER"
assert seq.toBracketString(False) == "PEPTIDESEKUEM[147.03540001709996]CER" or \
seq.toBracketString(False) == "PEPTIDESEKUEM[147.035400017100017]CER"
assert seq.toUniModString() == "PEPTIDESEKUEM(UniMod:35)CER"
assert seq.isModified()
assert not seq.hasCTerminalModification()
assert not seq.hasNTerminalModification()
assert not seq.empty()
assert seq.getResidue(1) is not None
assert seq.size() == 16
try:
seq.getResidue(1000)
except RuntimeError:
print("Exception successfully triggered.")
else:
print("Error: Exception not triggered.")
assert False
assert seq.getFormula(pyopenms.Residue.ResidueType.Full, 0) == pyopenms.EmpiricalFormula("C75H122N20O32S2Se1")
assert abs(seq.getMonoWeight(pyopenms.Residue.ResidueType.Full, 0) - 1952.7200317517998) < 1e-5
@report
def testElement():
ins = pyopenms.Element()
ins.setAtomicNumber(6)
ins.getAtomicNumber()
ins.setAverageWeight(12.011)
ins.getAverageWeight()
ins.setMonoWeight(12)
ins.getMonoWeight()
iso = pyopenms.IsotopeDistribution()
ins.setIsotopeDistribution(iso)
ins.getIsotopeDistribution()
ins.setName("Carbon")
ins.getName()
ins.setSymbol("C")
ins.getSymbol()
e = pyopenms.Element()
e.setSymbol("blah")
e.setSymbol("blah")
e.setSymbol(u"blah")
e.setSymbol(str("blah"))
oms_string = s("blu")
e.setSymbol(oms_string)
assert oms_string
assert oms_string.toString() == "blu"
evil = u"blü"
evil8 = evil.encode("utf8")
evil1 = evil.encode("latin1")
e.setSymbol(evil.encode("utf8"))
assert e.getSymbol() == u"blü"
e.setSymbol(evil.encode("latin1"))
assert e.getSymbol().decode("latin1") == u"blü"
e.setSymbol(evil8.decode("utf8"))
assert e.getSymbol() == u"blü"
assert s(e.getSymbol()) == s(u"blü")
assert s(e.getSymbol()).toString() == u"blü"
e.setSymbol(evil1)
assert e.getSymbol().decode("latin1") == u"blü"
e.setSymbol(evil8)
assert e.getSymbol() == u"blü"
@report
def testResidue():
ins = pyopenms.Residue()
pyopenms.Residue.ResidueType.Full
pyopenms.Residue.ResidueType.Internal
pyopenms.Residue.ResidueType.NTerminal
pyopenms.Residue.ResidueType.CTerminal
pyopenms.Residue.ResidueType.AIon
pyopenms.Residue.ResidueType.BIon
pyopenms.Residue.ResidueType.CIon
pyopenms.Residue.ResidueType.XIon
pyopenms.Residue.ResidueType.YIon
pyopenms.Residue.ResidueType.ZIon
pyopenms.Residue.ResidueType.SizeOfResidueType
@report
def testIsotopeDistribution():
ins = pyopenms.IsotopeDistribution()
ins.getMax()
ins.getMin()
ins.size()
ins.clear()
ins.renormalize()
ins.trimLeft(6.0)
ins.trimRight(8.0)
ins.clear()
ins.insert(1, 2)
ins.insert(6, 5)
assert ins.size() == 2
for p in ins:
print(p)
@report
def testFineIsotopePatternGenerator():
iso = pyopenms.FineIsotopePatternGenerator()
iso.setThreshold(1e-5)
iso.setAbsolute(True)
assert iso.getAbsolute()
methanol = pyopenms.EmpiricalFormula("CH3OH")
water = pyopenms.EmpiricalFormula("H2O")
mw = methanol + water
iso_dist = mw.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-20, False, False))
assert len(iso_dist.getContainer()) == 56
iso_dist = mw.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-200, False, False))
assert len(iso_dist.getContainer()) == 84
c100 = pyopenms.EmpiricalFormula("C100")
iso_dist = c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-200, False, False))
assert len(iso_dist.getContainer()) == 101
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-2, False, False)).size() == 6
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-2, False, True)).size() == 5
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-2, True, False)).size() == 5
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-2, True, True)).size() == 5
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-10, False, False)).size() == 14
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-10, False, True)).size() == 13
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-10, True, False)).size() == 10
assert c100.getIsotopeDistribution(pyopenms.FineIsotopePatternGenerator(1e-10, True, True)).size() == 10
iso = pyopenms.FineIsotopePatternGenerator(1e-5, False, False)
isod = iso.run(methanol)
assert len(isod.getContainer()) == 6
assert abs(isod.getContainer()[0].getMZ() - 32.0262151276) < 1e-5
assert isod.getContainer()[0].getIntensity() - 0.986442089081 < 1e-5
@report
def testCoarseIsotopePatternGenerator():
iso = pyopenms.CoarseIsotopePatternGenerator()
iso.setMaxIsotope(5)
assert iso.getMaxIsotope() == 5
res = iso.estimateFromPeptideWeight(500)
methanol = pyopenms.EmpiricalFormula("CH3OH")
water = pyopenms.EmpiricalFormula("H2O")
mw = methanol + water
iso_dist = mw.getIsotopeDistribution(pyopenms.CoarseIsotopePatternGenerator(3))
assert len(iso_dist.getContainer()) == 3, len(iso_dist.getContainer())
iso_dist = mw.getIsotopeDistribution(pyopenms.CoarseIsotopePatternGenerator(0))
assert len(iso_dist.getContainer()) == 18, len(iso_dist.getContainer())
iso = pyopenms.CoarseIsotopePatternGenerator(10)
isod = iso.run(methanol)
assert len(isod.getContainer()) == 10, len(isod.getContainer())
assert abs(isod.getContainer()[0].getMZ() - 32.0262151276) < 1e-5
assert isod.getContainer()[0].getIntensity() - 0.986442089081 < 1e-5
@report
def testEmpiricalFormula():
ins = pyopenms.EmpiricalFormula()
ins.getMonoWeight()
ins.getAverageWeight()
ins.getIsotopeDistribution(pyopenms.CoarseIsotopePatternGenerator(0))
ins.getNumberOfAtoms()
ins.setCharge(2)
ins.getCharge()
ins.toString()
ins.isEmpty()
ins.isCharged()
ins.hasElement( pyopenms.Element() )
ef = pyopenms.EmpiricalFormula("C2H5")
s = ef.toString()
assert s == "C2H5"
m = ef.getElementalComposition()
assert m[b"C"] == 2
assert m[b"H"] == 5
assert ef.getNumberOfAtoms() == 7
@report
def testIdentificationHit():
f = pyopenms.IdentificationHit()
_testMetaInfoInterface(f)
assert pyopenms.IdentificationHit().setId is not None
assert pyopenms.IdentificationHit().getId is not None
assert pyopenms.IdentificationHit().setCharge is not None
assert pyopenms.IdentificationHit().getCharge is not None
assert pyopenms.IdentificationHit().setCalculatedMassToCharge is not None
assert pyopenms.IdentificationHit().getCalculatedMassToCharge is not None
assert pyopenms.IdentificationHit().setExperimentalMassToCharge is not None
assert pyopenms.IdentificationHit().getExperimentalMassToCharge is not None
assert pyopenms.IdentificationHit().setName is not None
assert pyopenms.IdentificationHit().getName is not None
assert pyopenms.IdentificationHit().setPassThreshold is not None
assert pyopenms.IdentificationHit().getPassThreshold is not None
assert pyopenms.IdentificationHit().setRank is not None
assert pyopenms.IdentificationHit().getRank is not None
f.setId("test_id")
assert f.getId() == "test_id"
f.setId("test_id")
assert f.getId() == "test_id"
f.setCharge(5)
assert f.getCharge() == 5
f.setCalculatedMassToCharge(5.0)
assert f.getCalculatedMassToCharge() == 5.0
f.setExperimentalMassToCharge(5.0)
assert f.getExperimentalMassToCharge() == 5.0
f.setName("test")
assert f.getName() == "test"
f.setPassThreshold(True)
assert f.getPassThreshold() == True
f.setRank(42)
assert f.getRank() == 42
@report
def testSpectrumIdentification():
f = pyopenms.SpectrumIdentification()
_testMetaInfoInterface(f)
assert pyopenms.SpectrumIdentification().setHits is not None
assert pyopenms.SpectrumIdentification().addHit is not None
assert pyopenms.SpectrumIdentification().getHits is not None
hit = pyopenms.IdentificationHit()
hit.setName("test1")
f.addHit(hit)
hit = pyopenms.IdentificationHit()
hit.setName("test2")
f.addHit(hit)
all_hits = f.getHits()
assert len(all_hits) == 2
assert "test1" in [h.getName() for h in all_hits]
assert "test2" in [h.getName() for h in all_hits]
@report
def testIdentification():
f = pyopenms.Identification()
_testMetaInfoInterface(f)
assert pyopenms.Identification().setCreationDate is not None
assert pyopenms.Identification().getCreationDate is not None
assert pyopenms.Identification().setSpectrumIdentifications is not None
assert pyopenms.Identification().addSpectrumIdentification is not None
assert pyopenms.Identification().getSpectrumIdentifications is not None
id1 = pyopenms.SpectrumIdentification()
f.addSpectrumIdentification(id1)
assert len(f.getSpectrumIdentifications()) == 1
id2 = pyopenms.SpectrumIdentification()
f.addSpectrumIdentification(id2)
assert len(f.getSpectrumIdentifications()) == 2
@report
def testModificationDefinitionsSet():
empty = pyopenms.ModificationDefinitionsSet()
fixed = [b"Carbamidomethyl"]
variable = [b"Oxidation"]
full = pyopenms.ModificationDefinitionsSet(fixed, variable)
@report
def test_AcquisitionInfo():
ai = pyopenms.AcquisitionInfo()
ai.__doc__
assert ai == ai
assert not ai != ai
ai.setMethodOfCombination("ABC")
assert ai.getMethodOfCombination() == "ABC"
@report
def test_BaseFeature():
bf = pyopenms.BaseFeature()
_testMetaInfoInterface(bf)
_testUniqueIdInterface(bf)
bf.clearUniqueId()
assert bf.ensureUniqueId()
assert bf.getCharge() == 0
assert isinstance(bf.getQuality(), float)
assert isinstance(bf.getUniqueId(), (long, int))
assert isinstance(bf.getWidth(), float)
assert not bf.hasInvalidUniqueId()
assert bf.hasValidUniqueId()
_testMetaInfoInterface(bf)
bf.setCharge(1)
bf.setQuality(0.0)
bf.setWidth(1.0)
@report
def test_AnnotationState():
state = pyopenms.AnnotationState()
assert state.FEATURE_ID_NONE is not None
assert state.FEATURE_ID_SINGLE is not None
assert state.FEATURE_ID_MULTIPLE_SAME is not None
assert state.FEATURE_ID_MULTIPLE_DIVERGENT is not None
assert state.SIZE_OF_ANNOTATIONSTATE is not None
@report
def testChecksumType():
assert isinstance(pyopenms.ChecksumType.MD5, int)
assert isinstance(pyopenms.ChecksumType.SHA1, int)
assert isinstance(pyopenms.ChecksumType.SIZE_OF_CHECKSUMTYPE, int)
assert isinstance(pyopenms.ChecksumType.UNKNOWN_CHECKSUM, int)
@report
def testChromatogramPeak():
p = pyopenms.ChromatogramPeak()
assert p == p
assert not p != p
p.setIntensity(12.0)
p.setRT(34.0)
assert p.getIntensity() == 12.0
assert p.getRT() == 34.0
@report
def testChromatogramToosl():
pyopenms.ChromatogramTools()
pyopenms.ChromatogramTools.convertChromatogramsToSpectra
pyopenms.ChromatogramTools.convertSpectraToChromatograms
@report
def testConsensusFeature():
f = pyopenms.ConsensusFeature()
f_ = copy.copy(f)
assert f_ == f
f_ = copy.deepcopy(f)
assert f_ == f
f_ = pyopenms.ConsensusFeature(f)
assert f_ == f
_testUniqueIdInterface(f)
_testMetaInfoInterface(f)
f.setCharge(1)
f.setQuality(2.0)
f.setWidth(4.0)
assert f.getCharge() == 1
assert f.getQuality() == 2.0
assert f.getWidth() == 4.0
f.insert(0, pyopenms.Peak2D(), 1)
f.insert(1, pyopenms.BaseFeature())
f.insert(2, pyopenms.ConsensusFeature())
f.computeConsensus()
f.computeDechargeConsensus
f.computeMonoisotopicConsensus()
assert f.size() >= 0
p = f.getPeptideIdentifications()
f.setPeptideIdentifications(p)
@report
def testConsensusMap():
m = pyopenms.ConsensusMap()
m_ = copy.copy(m)
assert m_ == m
m_ = copy.deepcopy(m)
assert m_ == m
m_ = pyopenms.ConsensusMap(m)
assert m_ == m
m.clear()
m.clearUniqueId()
m.ensureUniqueId()
m.getDataProcessing()
m.getColumnHeaders()
m.getProteinIdentifications()
m.getUnassignedPeptideIdentifications()
m.getUniqueId()
m.hasInvalidUniqueId()
m.hasValidUniqueId()
m.setDataProcessing
m.setColumnHeaders
m.setProteinIdentifications
m.setUnassignedPeptideIdentifications
m.setUniqueId
m.setUniqueIds
m.size()
m.sortByIntensity()
m.sortByMZ()
m.sortByMaps()
m.sortByPosition()
m.sortByQuality()
m.sortByRT()
m.sortBySize()
m.updateRanges()
assert isinstance(m.getMin()[0], float)
assert isinstance(m.getMin()[0], float)
assert isinstance(m.getMax()[1], float)
assert isinstance(m.getMax()[1], float)
assert isinstance(m.getMinInt(), float)
assert isinstance(m.getMaxInt(), float)
m.getIdentifier()
m.getLoadedFileType()
m.getLoadedFilePath()
assert m == m
assert not m != m
@report
def testConsensusXMLFile():
f = pyopenms.ConsensusXMLFile()
f.getOptions()
assert f.load is not None
assert f.store is not None
@report
def testXTandemXMLFile():
f = pyopenms.XTandemXMLFile()
assert f.load is not None
@report
def testXTandemInfile():
f = pyopenms.XTandemInfile()
f.setFragmentMassTolerance is not None
f.getFragmentMassTolerance is not None
f.setPrecursorMassTolerancePlus is not None
f.getPrecursorMassTolerancePlus is not None
f.setPrecursorMassToleranceMinus is not None
f.getPrecursorMassToleranceMinus is not None
f.setPrecursorErrorType is not None
f.getPrecursorErrorType is not None
f.setFragmentMassErrorUnit is not None
f.getFragmentMassErrorUnit is not None
f.setPrecursorMassErrorUnit is not None
f.getPrecursorMassErrorUnit is not None
f.setNumberOfThreads is not None
f.getNumberOfThreads is not None
f.setModifications is not None
f.getModifications is not None
f.setOutputFilename is not None
f.getOutputFilename is not None
f.setInputFilename is not None
f.getInputFilename is not None
f.setTaxonomyFilename is not None
f.getTaxonomyFilename is not None
f.setDefaultParametersFilename is not None
f.getDefaultParametersFilename is not None
f.setTaxon("testTaxon")
assert f.getTaxon() == "testTaxon"
assert f.setMaxPrecursorCharge is not None
assert f.getMaxPrecursorCharge is not None
assert f.setNumberOfMissedCleavages is not None
assert f.getNumberOfMissedCleavages is not None
assert f.setMaxValidEValue is not None
assert f.getMaxValidEValue is not None
assert f.setSemiCleavage is not None
assert f.setAllowIsotopeError is not None
assert f.write is not None
assert f.setCleavageSite is not None
assert f.getCleavageSite is not None
@report
def testSignalToNoiseEstimatorMedian():
f = pyopenms.SignalToNoiseEstimatorMedian()
assert f.init is not None
assert f.getSignalToNoise is not None
@report
def testSignalToNoiseEstimatorMedianChrom():
f = pyopenms.SignalToNoiseEstimatorMedianChrom()
assert f.init is not None
assert f.getSignalToNoise is not None
@report
def testConvexHull2D():
ch = pyopenms.ConvexHull2D()
ch.clear()
assert ch == ch
@report
def testDataProcessing(dp=pyopenms.DataProcessing()):
_testMetaInfoInterface(dp)
assert dp == dp
assert not dp != dp
dp.clearMetaInfo()
k = []
dp.getKeys(k)
assert k == []
dp.getMetaValue
ac = dp.getProcessingActions()
assert ac == set(())
ac = set([ pyopenms.ProcessingAction.PEAK_PICKING, pyopenms.ProcessingAction.BASELINE_REDUCTION])
dp.setProcessingActions(ac)
assert len(dp.getProcessingActions() ) == 2
_testStrOutput(dp.getSoftware().getName())
_testStrOutput(dp.getSoftware().getVersion())
dp.isMetaEmpty()
dp.metaValueExists
dp.removeMetaValue
s = dp.getSoftware()
s.setName("pyopenms")
dp.setSoftware(s)
assert dp.getSoftware().getName() == "pyopenms"
@report
def testDataType():
assert isinstance(pyopenms.DataType.DOUBLE_LIST, int)
assert isinstance(pyopenms.DataType.DOUBLE_VALUE, int)
assert isinstance(pyopenms.DataType.EMPTY_VALUE, int)
assert isinstance(pyopenms.DataType.INT_LIST, int)
assert isinstance(pyopenms.DataType.INT_VALUE, int)
assert isinstance(pyopenms.DataType.STRING_LIST, int)
assert isinstance(pyopenms.DataType.STRING_VALUE, int)
@report
def testDataValue():
a = pyopenms.DataValue()
assert a.isEmpty()
a = pyopenms.DataValue(1)
assert not a.isEmpty()
assert a.toInt() == 1
assert a.valueType() == pyopenms.DataType.INT_VALUE
a = pyopenms.DataValue(1.0)
assert not a.isEmpty()
assert a.toDouble() == 1.0
assert a.valueType() == pyopenms.DataType.DOUBLE_VALUE
a = pyopenms.DataValue("1")
assert not a.isEmpty()
assert a.toString() == "1"
assert a.valueType() == pyopenms.DataType.STRING_VALUE
a = pyopenms.DataValue([1])
assert not a.isEmpty()
assert a.toIntList() == [1]
assert a.valueType() == pyopenms.DataType.INT_LIST
a = pyopenms.DataValue([1.0])
assert not a.isEmpty()
assert a.toDoubleList() == [1.0]
assert a.valueType() == pyopenms.DataType.DOUBLE_LIST
a = pyopenms.DataValue([b"1.0"])
assert not a.isEmpty()
assert a.toStringList() == [b"1.0"]
assert a.valueType() == pyopenms.DataType.STRING_LIST
assert pyopenms.MSSpectrum().getMetaValue("nonexisingkey") is None
@report
def testAdduct():
a = pyopenms.Adduct()
@report
def testGaussFitter():
ins = pyopenms.GaussFitter()
@report
def testGaussFitResult():
ins = pyopenms.GaussFitResult(0.0, 0.0, 0.0)
ins.A = 5.0
ins.x0 = 5.0
ins.sigma = 5.0
@report
def testChargePair():
a = pyopenms.ChargePair()
@report
def testCompomer():
a = pyopenms.Compomer()
@report
def testCVMappings():
val = pyopenms.CVMappings()
@report
def testCVMappingFile():
val = pyopenms.CVMappingFile()
assert pyopenms.CVMappingFile().load
@report
def testControlledVocabulary():
val = pyopenms.ControlledVocabulary()
assert pyopenms.ControlledVocabulary().loadFromOBO
@report
def testSemanticValidator():
m = pyopenms.CVMappings()
cv = pyopenms.ControlledVocabulary()
val = pyopenms.SemanticValidator(m, cv)
assert val.validate is not None
assert val.setCheckTermValueTypes is not None
assert val.setCheckUnits is not None
# @tests: DateTime
# DateTime.__init__
# DateTime.getDate
# DateTime.getTime
# DateTime.now
# """
@report
def testFeature():
f = pyopenms.Feature()
_testMetaInfoInterface(f)
_testUniqueIdInterface(f)
f.setConvexHulls(f.getConvexHulls())
f.setSubordinates(f.getSubordinates())
f.setUniqueId(12345)
assert f == f
assert not f != f
f.setCharge(-1)
assert f.getCharge() == -1
f.setIntensity(10.0)
assert f.getIntensity() == 10.0
f.setQuality(0, 20.0)
assert f.getQuality(0) == 20.0
f.setRT(30.0)
assert f.getRT() == 30.0
f.setWidth(40.0)
assert f.getWidth() == 40.0
p = f.getPeptideIdentifications()
f.setPeptideIdentifications(p)
@report
def testFeatureFinder():
ff = pyopenms.FeatureFinder()
name = pyopenms.FeatureFinderAlgorithmPicked.getProductName()
ff.run(name, pyopenms.MSExperiment(), pyopenms.FeatureMap() ,
pyopenms.Param(), pyopenms.FeatureMap())
_testProgressLogger(ff)
p = ff.getParameters(name)
_testParam(p)
@report
def testFeatureFileOptions():
fo = pyopenms.FeatureFileOptions()
fo.getLoadConvexHull()
fo.getLoadSubordinates()
fo.getSizeOnly()
assert fo.setLoadConvexHull is not None
assert fo.setLoadSubordinates is not None
assert fo.setMetadataOnly is not None
assert fo.setSizeOnly is not None
@report
def _testParam(p):
assert p == p
dd = p.asDict()
assert len(dd) == p.size()
assert isinstance(dd, dict)
for k in p.keys():
value = p[k]
p[k] = value
p.update(p)
p.update(p.asDict())
assert p[k] == value
desc = p.getDescription(k)
tags = p.getTags(k)
p.setValue(k, value, desc, tags)
p.setValue(k, value, desc)
assert p.exists(k)
if len(k.split(b":")) < 2: continue
f = k.split(b":")[0]
p.setSectionDescription(f, k)
assert p.getSectionDescription(f) == k.decode()
assert p.get(k) is not None
assert len(p.values()) == len([p[k] for k in p.keys()])
assert sorted(p.items()) == sorted((k, p[k]) for k in p.keys())
assert not p.exists("asdflkj01231321321v")
p.addTag(k, "a")
p.addTags(k, [b"", b"c"])
assert sorted(p.getTags(k)) == [b"", b"a", b"c"]
p.clearTags(k)
assert p.getTags(k) == []
pn = pyopenms.Param()
pn.insert("master:", p)
assert pn.exists(b"master:"+k)
p1 = pn.copy("master:", True)
assert p1 == p
p1.update(p)
p1.update(p,0)
p1.update(p,1)
p1.update(dd)
p.setValidStrings
p.setMinFloat
p.setMaxFloat
p.setMinInt
p.setMaxInt
ph = pyopenms.ParamXMLFile()
ph.store("test.ini", p)
p1 = pyopenms.Param()
ph.load("test.ini", p1)
assert p == p1
e1 = p1.getEntry(k)
for f in ["name", "description", "value", "tags", "valid_strings",
"min_float", "max_float", "min_int", "max_int"]:
assert getattr(e1, f) is not None
assert e1 == e1
assert p1.get(b"abcde", 7) == 7
@report
def testFeatureFinderAlgorithmPicked():
ff = pyopenms.FeatureFinderAlgorithmPicked()
p = ff.getDefaults()
_testParam(p)
_testParam(ff.getParameters())
assert ff.getName() == "FeatureFinderAlgorithm"
assert pyopenms.FeatureFinderAlgorithmPicked.getProductName() == "centroided"
ff.setParameters(pyopenms.Param())
ff.setName("test")
assert ff.getName() == "test"
@report
def testFeatureFinderAlgorithmIsotopeWavelet():
ff = pyopenms.FeatureFinderAlgorithmIsotopeWavelet()
p = ff.getDefaults()
_testParam(p)
assert ff.getName() == "FeatureFinderAlgorithm"
assert pyopenms.FeatureFinderAlgorithmIsotopeWavelet.getProductName() == "isotope_wavelet"
ff.setParameters(pyopenms.Param())
ff.setName("test")
assert ff.getName() == "test"
@report
def testCompNovoIdentification():
ff = pyopenms.CompNovoIdentification()
p = ff.getDefaults()
_testParam(p)
assert pyopenms.CompNovoIdentification().getIdentification is not None
assert pyopenms.CompNovoIdentification().getIdentifications is not None
@report
def testCompNovoIdentificationCID():
ff = pyopenms.CompNovoIdentificationCID()
p = ff.getDefaults()
_testParam(p)
assert pyopenms.CompNovoIdentificationCID().getIdentification is not None
assert pyopenms.CompNovoIdentificationCID().getIdentifications is not None
@report
def testExperimentalSettings():
ff = pyopenms.ExperimentalSettings()
@report
def testFeatureDeconvolution():
ff = pyopenms.FeatureDeconvolution()
p = ff.getDefaults()
_testParam(p)
assert pyopenms.FeatureDeconvolution().compute is not None
@report
def testInternalCalibration():
ff = pyopenms.InternalCalibration()
p = ff.getDefaults()
_testParam(p)
@report
def testItraqConstants():
constants = pyopenms.ItraqConstants()
assert pyopenms.ITRAQ_TYPES.FOURPLEX is not None
assert pyopenms.ITRAQ_TYPES.EIGHTPLEX is not None
assert pyopenms.ITRAQ_TYPES.TMT_SIXPLEX is not None
assert constants.getIsotopeMatrixAsStringList is not None
assert constants.updateIsotopeMatrixFromStringList is not None
assert constants.translateIsotopeMatrix is not None
@report
def testLinearResampler():
ff = pyopenms.LinearResampler()
p = ff.getDefaults()
_testParam(p)
assert pyopenms.LinearResampler().raster is not None
assert pyopenms.LinearResampler().rasterExperiment is not None
@report
def testPeptideAndProteinQuant():
ff = pyopenms.PeptideAndProteinQuant()
p = ff.getDefaults()
_testParam(p)
assert pyopenms.PeptideAndProteinQuant().quantifyPeptides is not None
assert pyopenms.PeptideAndProteinQuant().quantifyProteins is not None
@report
def testSeedListGenerator():
ff = pyopenms.SeedListGenerator()
p = ff.getDefaults()
_testParam(p)
@report
def testTOFCalibration():
ff = pyopenms.TOFCalibration()
p = ff.getDefaults()
assert pyopenms.TOFCalibration().calibrate is not None
assert pyopenms.TOFCalibration().pickAndCalibrate is not None
# @tests: ConsensusID
# ConsensusID.__init__
# """
@report
def testFalseDiscoveryRate():
ff = pyopenms.FalseDiscoveryRate()
p = ff.getDefaults()
_testParam(p)
assert pyopenms.FalseDiscoveryRate().apply is not None
@report
def testIDFilter():
ff = pyopenms.IDFilter()
@report
def testProteinResolver():
ff = pyopenms.ProteinResolver()
assert pyopenms.ProteinResolver().resolveConsensus is not None
assert pyopenms.ProteinResolver().resolveID is not None
assert pyopenms.ProteinResolver().setProteinData is not None
assert pyopenms.ProteinResolver().getResults is not None
@report
def testSvmTheoreticalSpectrumGeneratorTrainer():
ff = pyopenms.SvmTheoreticalSpectrumGeneratorTrainer()
assert pyopenms.SvmTheoreticalSpectrumGeneratorTrainer().trainModel is not None
assert pyopenms.SvmTheoreticalSpectrumGeneratorTrainer().normalizeIntensity is not None
@report
def testPosteriorErrorProbabilityModel():
model = pyopenms.PosteriorErrorProbabilityModel()
p = model.getDefaults()
_testParam(p)
assert pyopenms.PosteriorErrorProbabilityModel().fit is not None
assert pyopenms.PosteriorErrorProbabilityModel().computeProbability is not None
scores = [float(i) for i in range(10)]
model.fit(scores, "none")
model.fit(scores, scores, "none")
model.fillLogDensities(scores, scores, scores)
assert model.computeLogLikelihood is not None
assert model.pos_neg_mean_weighted_posteriors is not None
GaussFitResult = model.getCorrectlyAssignedFitResult()
GaussFitResult = model.getIncorrectlyAssignedFitResult()
model.getNegativePrior()
model.computeProbability(5.0)
target = [float(i) for i in range(10)]
model.getGumbelGnuplotFormula(GaussFitResult)
model.getGaussGnuplotFormula(GaussFitResult)
model.getBothGnuplotFormula(GaussFitResult, GaussFitResult)
model.plotTargetDecoyEstimation(target, target)
model.getSmallestScore()
@report
def testSeedListGenerator():
ff = pyopenms.SeedListGenerator()
@report
def testConsensusMapNormalizerAlgorithmMedian():
ff = pyopenms.ConsensusMapNormalizerAlgorithmMedian()
assert pyopenms.ConsensusMapNormalizerAlgorithmMedian().normalizeMaps is not None
@report
def testConsensusMapNormalizerAlgorithmQuantile():
ff = pyopenms.ConsensusMapNormalizerAlgorithmQuantile()
assert pyopenms.ConsensusMapNormalizerAlgorithmQuantile().normalizeMaps is not None
@report
def testConsensusMapNormalizerAlgorithmThreshold():
ff = pyopenms.ConsensusMapNormalizerAlgorithmThreshold()
assert pyopenms.ConsensusMapNormalizerAlgorithmThreshold().computeCorrelation is not None
assert pyopenms.ConsensusMapNormalizerAlgorithmThreshold().normalizeMaps is not None
@report
def testFeatureFinderAlgorithmPicked():
ff = pyopenms.FeatureFinderAlgorithmPicked()
assert pyopenms.FeatureFinderAlgorithmPicked().setData is not None
assert pyopenms.FeatureFinderAlgorithmPicked().run is not None
@report
def testFeatureFinderAlgorithmIsotopeWavelet():
ff = pyopenms.FeatureFinderAlgorithmIsotopeWavelet()
assert pyopenms.FeatureFinderAlgorithmIsotopeWavelet().setData is not None
assert pyopenms.FeatureFinderAlgorithmIsotopeWavelet().run is not None
@report
def testAScore():
ff = pyopenms.AScore()
hit = pyopenms.PeptideHit()
spectrum = pyopenms.MSSpectrum()
ff.compute(hit, spectrum)
@report
def testIDRipper():
ff = pyopenms.IDRipper()
assert pyopenms.IDRipper().rip is not None
@report
def testFASTAFile():
ff = pyopenms.FASTAFile()
assert pyopenms.FASTAFile().load is not None
assert pyopenms.FASTAFile().store is not None
@report
def testFASTAEntry():
ff = pyopenms.FASTAEntry()
@report
def testInternalCalibration():
ff = pyopenms.InternalCalibration()
assert pyopenms.InternalCalibration().fillCalibrants is not None
assert pyopenms.InternalCalibration().getCalibrationPoints is not None
assert pyopenms.InternalCalibration().calibrate is not None
@report
def testTransitionTSVFile():
ff = pyopenms.TransitionTSVFile()
assert pyopenms.TransitionTSVFile().convertTargetedExperimentToTSV is not None
assert pyopenms.TransitionTSVFile().convertTSVToTargetedExperiment is not None
assert pyopenms.TransitionTSVFile().validateTargetedExperiment is not None
@report
def testProteaseDigestion():
ff = pyopenms.ProteaseDigestion()
assert pyopenms.ProteaseDigestion().getMissedCleavages is not None
assert pyopenms.ProteaseDigestion().setMissedCleavages is not None
assert pyopenms.ProteaseDigestion().digest is not None
assert pyopenms.ProteaseDigestion().peptideCount is not None
ff.setMissedCleavages(5)
assert ff.getMissedCleavages() == 5
@report
def testEnzymaticDigestionLogModel():
ff = pyopenms.EnzymaticDigestionLogModel()
assert pyopenms.EnzymaticDigestionLogModel().getLogThreshold is not None
assert pyopenms.EnzymaticDigestionLogModel().setLogThreshold is not None
assert pyopenms.EnzymaticDigestionLogModel().digest is not None
assert pyopenms.EnzymaticDigestionLogModel().peptideCount is not None
ff.setLogThreshold(0.25)
assert ff.getLogThreshold() == 0.25
@report
def testIDDecoyProbability():
ff = pyopenms.IDDecoyProbability()
assert pyopenms.IDDecoyProbability().apply is not None
@report
def testFeatureGrouping():
assert pyopenms.FeatureGroupingAlgorithm.getDefaults is not None
assert pyopenms.FeatureGroupingAlgorithm.getName is not None
assert pyopenms.FeatureGroupingAlgorithm.getParameters is not None
assert pyopenms.FeatureGroupingAlgorithm.setName is not None
assert pyopenms.FeatureGroupingAlgorithm.setParameters is not None
assert pyopenms.FeatureGroupingAlgorithm.transferSubelements is not None
qt = pyopenms.FeatureGroupingAlgorithmQT()
qt.getDefaults()
qt.getParameters()
qt.getName()
assert qt.group is not None
assert qt.setName is not None
assert qt.setParameters is not None
assert qt.transferSubelements is not None
@report
def testFeatureMap():
fm = pyopenms.FeatureMap()
fm_ = copy.copy(fm)
assert fm_ == fm
fm_ = copy.deepcopy(fm)
assert fm_ == fm
fm_ = pyopenms.FeatureMap(fm)
assert fm_ == fm
_testUniqueIdInterface(fm)
fm.clear()
fm.clearUniqueId()
fm.getIdentifier()
fm.getLoadedFileType()
fm.getLoadedFilePath()
f = pyopenms.Feature()
fm.push_back(f)
assert len(list(fm)) == 1
assert fm.size() == 1
assert fm[0] == f
fm.sortByIntensity()
assert fm.size() == 1
assert fm[0] == f
fm.sortByIntensity(False)
assert fm.size() == 1
assert fm[0] == f
fm.sortByPosition()
assert fm.size() == 1
assert fm[0] == f
fm.sortByRT()
assert fm.size() == 1
assert fm[0] == f
fm.sortByMZ()
assert fm.size() == 1
assert fm[0] == f
fm.sortByOverallQuality()
assert fm.size() == 1
assert fm[0] == f
fm2 = pyopenms.FeatureMap()
fm.swap(fm2)
assert fm2.size() == 1
assert fm2[0] == f
assert fm.size() == 0
fm2.updateRanges()
assert isinstance(fm2.getMin()[0], float)
assert isinstance(fm2.getMin()[1], float)
assert isinstance(fm2.getMax()[0], float)
assert isinstance(fm2.getMax()[1], float)
assert isinstance(fm2.getMinInt(), float)
assert isinstance(fm2.getMaxInt(), float)
assert fm2.getProteinIdentifications() == []
fm2.setProteinIdentifications([])
assert fm2.getUnassignedPeptideIdentifications() == []
fm2.setUnassignedPeptideIdentifications([])
fm2.clear()
assert fm2.size() == 0
dp = pyopenms.DataProcessing()
fm2.setDataProcessing([dp])
assert fm2.getDataProcessing() == [dp]
testDataProcessing(dp)
fm2.setUniqueIds()
fm += fm
assert fm + fm2 != fm
@report
def testFeatureXMLFile():
fm = pyopenms.FeatureMap()
fm.setUniqueIds()
fh = pyopenms.FeatureXMLFile()
fh.store("test.featureXML", fm)
fh.load("test.featureXML", fm)
fh = pyopenms.FileHandler()
fh.loadFeatures("test.featureXML", fm)
@report
def testFileDescription():
fd = pyopenms.ColumnHeader()
_testStrOutput(fd.filename)
_testStrOutput(fd.label)
assert isinstance(fd.size, int)
@report
def testFileHandler():
mse = pyopenms.MSExperiment()
fh = pyopenms.FileHandler()
fh.storeExperiment("test1.mzML", mse)
fh.loadExperiment("test1.mzML", mse)
fh.storeExperiment("test1.mzXML", mse)
fh.loadExperiment("test1.mzXML", mse)
fh.storeExperiment("test1.mzData", mse)
fh.loadExperiment("test1.mzData", mse)
@report
def testCachedMzML():
mse = pyopenms.MSExperiment()
s = pyopenms.MSSpectrum()
mse.addSpectrum(s)
pyopenms.CachedmzML.store("myCache.mzML", mse)
cfile = pyopenms.CachedmzML()
pyopenms.CachedmzML.load("myCache.mzML", cfile)
meta_data = cfile.getMetaData()
assert cfile.getNrChromatograms() ==0
assert cfile.getNrSpectra() == 1
@report
def testIndexedMzMLFile():
mse = pyopenms.MSExperiment()
s = pyopenms.MSSpectrum()
mse.addSpectrum(s)
pyopenms.MzMLFile().store("tfile_idx.mzML", mse)
ih = pyopenms.IndexedMzMLHandler("tfile_idx.mzML")
assert ih.getNrChromatograms() ==0
assert ih.getNrSpectra() == 1
s = ih.getMSSpectrumById(0)
s2 = ih.getSpectrumById(0)
@report
def testIDMapper():
idm = pyopenms.IDMapper()
assert idm.annotate is not None
idm.getDefaults()
idm.setName("x")
assert idm.getName() == "x"
idm.setParameters(idm.getParameters())
@report
def testIdXMLFile():
assert pyopenms.IdXMLFile().load is not None
assert pyopenms.IdXMLFile().store is not None
@report
def testPepXMLFile():
f = pyopenms.PepXMLFile()
assert pyopenms.PepXMLFile().load is not None
assert pyopenms.PepXMLFile().store is not None
@report
def testProtXMLFile():
f = pyopenms.ProtXMLFile()
assert pyopenms.ProtXMLFile().load is not None
assert pyopenms.ProtXMLFile().store is not None
@report
def testDTA2DFile():
f = pyopenms.DTA2DFile()
assert pyopenms.DTA2DFile().load is not None
assert pyopenms.DTA2DFile().store is not None
@report
def testDTAFile():
f = pyopenms.DTAFile()
assert pyopenms.DTAFile().load is not None
assert pyopenms.DTAFile().store is not None
@report
def testEDTAFile():
f = pyopenms.EDTAFile()
assert pyopenms.EDTAFile().load is not None
assert pyopenms.EDTAFile().store is not None
@report
def testKroenikFile():
f = pyopenms.KroenikFile()
assert pyopenms.KroenikFile().load is not None
assert pyopenms.KroenikFile().store is not None
@report
def testMSPFile():
f = pyopenms.MSPFile()
@report
def testMzIdentMLFile():
f = pyopenms.MzIdentMLFile()
assert pyopenms.MzIdentMLFile().load is not None
assert pyopenms.MzIdentMLFile().store is not None
assert pyopenms.MzIdentMLFile().isSemanticallyValid is not None
@report
def testMzTabFile():
f = pyopenms.MzTabFile()
@report
def testMzTab():
@report
def testInstrumentSettings():
ins = pyopenms.InstrumentSettings()
_testMetaInfoInterface(ins)
ins.setPolarity(pyopenms.IonSource.Polarity.NEGATIVE)
assert ins.getPolarity() == pyopenms.IonSource.Polarity.NEGATIVE
assert ins == ins
assert not ins != ins
@report
def testContactPerson():
ins = pyopenms.ContactPerson()
ins.getFirstName()
ins.setFirstName("test")
ins.getLastName()
ins.setLastName("test")
ins.setName("Testy Test")
ins.getInstitution()
ins.setInstitution("test")
ins.getEmail()
ins.setEmail("test")
ins.getURL()
ins.setURL("test")
ins.getAddress()
ins.setAddress("test")
ins.getContactInfo()
ins.setContactInfo("test")
@report
def testDocumentIdentifier():
ins = pyopenms.DocumentIdentifier()
ins.setIdentifier("test")
ins.getIdentifier()
ins.getLoadedFilePath()
ins.getLoadedFileType()
@report
def testGradient():
ins = pyopenms.Gradient()
ins.addEluent("test")
ins.clearEluents()
assert len(ins.getEluents() ) == 0
ins.addEluent("test")
assert len(ins.getEluents() ) == 1
ins.clearTimepoints()
ins.addTimepoint(5)
ins.getTimepoints()
ins.setPercentage("test", 5, 20)
ins.getPercentage("test", 5)
ins.clearPercentages()
ins.isValid()
@report
def testHPLC():
ins = pyopenms.HPLC()
ins.setInstrument("test")
ins.getInstrument()
ins.setColumn("test")
ins.getColumn()
ins.setTemperature(6)
ins.getTemperature()
ins.setPressure(6)
ins.getPressure()
ins.setFlux(8)
ins.getFlux()
ins.setComment("test")
ins.getComment()
g = pyopenms.Gradient()
ins.setGradient(g)
ins.getGradient()
@report
def testInstrument():
ins = pyopenms.Instrument()
ins.setName("test")
ins.getName()
ins.setVendor("test")
ins.getVendor()
ins.setModel("test")
ins.getModel()
ins.setCustomizations("test")
ins.getCustomizations()
ion_sources = [ pyopenms.IonSource() for i in range(5)]
ins.setIonSources(ion_sources)
ins.getIonSources()
mass_analyzers = [ pyopenms.MassAnalyzer() for i in range(5)]
ins.setMassAnalyzers(mass_analyzers)
ins.getMassAnalyzers()
ion_detectors = [ pyopenms.IonDetector() for i in range(5)]
ins.setIonDetectors(ion_detectors)
ins.getIonDetectors()
s = pyopenms.Software()
ins.setSoftware(s)
ins.getSoftware()
@report
def testIonDetector():
ins = pyopenms.IonDetector()
m = pyopenms.IonDetector.AcquisitionMode.ACQMODENULL
ins.setAcquisitionMode(m)
ins.getAcquisitionMode()
ins.setResolution(8.0)
ins.getResolution()
ins.setADCSamplingFrequency(8.0)
ins.getADCSamplingFrequency()
ins.setOrder(8)
ins.getOrder()
@report
def testIonSource():
ins = pyopenms.IonSource()
p = pyopenms.IonSource.Polarity.POSITIVE
ins.setPolarity(p)
ins.getPolarity()
i = pyopenms.IonSource.InletType.INLETNULL
ins.setInletType(i)
ins.getInletType()
i = pyopenms.IonSource.IonizationMethod.ESI
ins.setIonizationMethod(i)
ins.getIonizationMethod()
ins.setOrder(5)
ins.getOrder()
@report
def testMassAnalyzer():
ins = pyopenms.MassAnalyzer()
ma = pyopenms.MassAnalyzer.AnalyzerType.QUADRUPOLE
ins.setType(ma)
ins.getType()
res = pyopenms.MassAnalyzer.ResolutionMethod.FWHM
ins.setResolutionMethod(res)
ins.getResolutionMethod()
res = pyopenms.MassAnalyzer.ResolutionType.CONSTANT
ins.setResolutionType(res)
ins.getResolutionType()
res = pyopenms.MassAnalyzer.ScanDirection.UP
ins.setScanDirection(res)
ins.getScanDirection()
res = pyopenms.MassAnalyzer.ScanLaw.LINEAR
ins.setScanLaw(res)
ins.getScanLaw()
res = pyopenms.MassAnalyzer.ReflectronState.ON
ins.setReflectronState(res)
ins.getReflectronState()
ins.setResolution(5.0)
ins.getResolution()
ins.setAccuracy(5.0)
ins.getAccuracy()
ins.setScanRate(5.0)
ins.getScanRate()
ins.setScanTime(5.0)
ins.getScanTime()
ins.setTOFTotalPathLength(5.0)
ins.getTOFTotalPathLength()
ins.setIsolationWidth(5.0)
ins.getIsolationWidth()
ins.setFinalMSExponent(5)
ins.getFinalMSExponent()
ins.setMagneticFieldStrength(5.0)
ins.getMagneticFieldStrength()
ins.setOrder(5)
ins.getOrder()
@report
def testSample():
ins = pyopenms.Sample()
ins.setName("test")
ins.getName()
ins.setOrganism("test")
ins.getOrganism()
ins.setNumber("test")
ins.getNumber()
ins.setComment("test")
ins.getComment()
state = pyopenms.Sample.SampleState.LIQUID
ins.setState(state)
ins.getState()
ins.setMass(42.0)
ins.getMass()
ins.setVolume(42.0)
ins.getVolume()
ins.setConcentration(42.0)
ins.getConcentration()
a = ins.getSubsamples()
ins.setSubsamples(a)
has_exception = False
try:
ins.removeTreatment(0)
except Exception:
has_exception = True
assert has_exception
assert ins.countTreatments() == 0
@report
def testLogType():
assert isinstance(pyopenms.LogType.CMD, int)
assert isinstance(pyopenms.LogType.GUI, int)
assert isinstance(pyopenms.LogType.NONE, int)
@report
def testMSExperiment():
mse = pyopenms.MSExperiment()
mse_ = copy.copy(mse)
assert mse_ == mse
mse_ = copy.deepcopy(mse)
assert mse_ == mse
mse_ = pyopenms.MSExperiment(mse)
assert mse_ == mse
_testMetaInfoInterface(mse)
mse.updateRanges()
mse.sortSpectra(True)
assert isinstance(mse.getMaxRT(), float)
assert isinstance(mse.getMinRT(), float)
assert isinstance(mse.getMaxMZ(), float)
assert isinstance(mse.getMinMZ(), float)
_testStrOutput(mse.getLoadedFilePath())
assert isinstance(mse.getMinInt(), float)
assert isinstance(mse.getMaxInt(), float)
assert isinstance(mse.getMin()[0], float)
assert isinstance(mse.getMin()[1], float)
assert isinstance(mse.getMax()[0], float)
assert isinstance(mse.getMax()[1], float)
mse.setLoadedFilePath("")
assert mse.size() == 0
mse.getIdentifier()
mse.getLoadedFileType()
mse.getLoadedFilePath()
spec = pyopenms.MSSpectrum()
data_mz = np.array( [5.0, 8.0] ).astype(np.float64)
data_i = np.array( [50.0, 80.0] ).astype(np.float32)
spec.set_peaks( [data_mz,data_i] )
mse.addSpectrum(spec)
assert mse.size() == 1
assert mse[0] is not None
mse.updateRanges()
rt, mz, inty = mse.get2DPeakDataLong(mse.getMinRT(),mse.getMaxRT(),mse.getMinMZ(),mse.getMaxMZ())
assert rt.shape[0] == 2
assert mz.shape[0] == 2
assert inty.shape[0] == 2
assert isinstance(list(mse), list)
assert mse == mse
assert not mse != mse
assert mse.getSize() >= 0
assert int(mse.isSorted()) in (0,1)
mse2 = copy.copy(mse)
assert mse.getSize() == mse2.getSize()
assert mse2 == mse
@report
def testMSQuantifications():
msq = pyopenms.MSQuantifications()
assert msq == msq
assert not msq != msq
msq.setConsensusMaps(msq.getConsensusMaps())
summary = msq.getAnalysisSummary()
msq.setDataProcessingList(msq.getDataProcessingList())
msq.getAssays()
msq.getFeatureMaps()
msq.setAnalysisSummaryQuantType(pyopenms.MSQuantifications.QUANT_TYPES.LABELFREE)
msq.addConsensusMap(pyopenms.ConsensusMap())
msq.assignUIDs()
@report
def testMSSpectrum():
spec = pyopenms.MSSpectrum()
spec_ = copy.copy(spec)
assert spec_ == spec
spec_ = copy.deepcopy(spec)
assert spec_ == spec
spec_ = pyopenms.MSSpectrum(spec)
assert spec_ == spec
_testMetaInfoInterface(spec)
testSpectrumSetting(spec)
spec.setRT(3.0)
assert spec.getRT() == 3.0
spec.setMSLevel(2)
assert spec.getMSLevel() == 2
spec.setName("spec")
assert spec.getName() == "spec"
p = pyopenms.Peak1D()
p.setMZ(1000.0)
p.setIntensity(200.0)
spec.push_back(p)
assert spec.size() == 1
assert spec[0] == p
spec.updateRanges()
assert isinstance(spec.findNearest(0.0), int)
assert isinstance(spec.getMin()[0], float)
assert isinstance(spec.getMax()[0], float)
assert isinstance(spec.getMinInt(), float)
assert isinstance(spec.getMaxInt(), float)
assert spec == spec
assert not spec != spec
mz, ii = spec.get_peaks()
assert len(mz) == len(ii)
assert len(mz) == 1
spec.set_peaks((mz, ii))
mz0, ii0 = spec.get_peaks()
assert mz0 == mz
assert ii0 == ii
assert int(spec.isSorted()) in (0,1)
spec.clear(False)
p = pyopenms.Peak1D()
p.setMZ(1000.0)
p.setIntensity(200.0)
spec.push_back(p)
p = pyopenms.Peak1D()
p.setMZ(2000.0)
p.setIntensity(400.0)
spec.push_back(p)
mz, ii = spec.get_peaks()
assert spec[0].getMZ() == 1000.0
assert spec[1].getMZ() == 2000.0
assert spec[0].getIntensity() == 200.0
assert spec[1].getIntensity() == 400.0
assert mz[0] == 1000.0
assert mz[1] == 2000.0
assert ii[0] == 200.0
assert ii[1] == 400.0
spec.clear(False)
data_mz = np.array( [5.0, 8.0] ).astype(np.float64)
data_i = np.array( [50.0, 80.0] ).astype(np.float32)
spec.set_peaks( [data_mz,data_i] )
mz, ii = spec.get_peaks()
assert spec[0].getMZ() == 5.0
assert spec[1].getMZ() == 8.0
assert spec[0].getIntensity() == 50.0
assert spec[1].getIntensity() == 80.0
assert mz[0] == 5.0
assert mz[1] == 8.0
assert ii[0] == 50.0
assert ii[1] == 80.0
spec.clear(False)
data_mz = np.array( [5.0, 8.0] ).astype(np.float64)
data_i = np.array( [50.0, 80.0] ).astype(np.float64)
spec.set_peaks( [data_mz,data_i] )
mz, ii = spec.get_peaks()
assert spec[0].getMZ() == 5.0
assert spec[1].getMZ() == 8.0
assert spec[0].getIntensity() == 50.0
assert spec[1].getIntensity() == 80.0
assert mz[0] == 5.0
assert mz[1] == 8.0
assert ii[0] == 50.0
assert ii[1] == 80.0
spec.clear(False)
data_mz = np.array( [5.0, 8.0] ).astype(np.float32)
data_i = np.array( [50.0, 80.0] ).astype(np.float32)
spec.set_peaks( [data_mz,data_i] )
mz, ii = spec.get_peaks()
assert spec[0].getMZ() == 5.0
assert spec[1].getMZ() == 8.0
assert spec[0].getIntensity() == 50.0
assert spec[1].getIntensity() == 80.0
assert mz[0] == 5.0
assert mz[1] == 8.0
assert ii[0] == 50.0
assert ii[1] == 80.0
taArrays()[0][0] == 5
assert spec.getIntegerDataArrays()[0][2] == 42
assert len(int_da[0].get_data() ) == 3
spec = pyopenms.MSSpectrum()
assert len(spec.getFloatDataArrays()) == 0
f_da = [ pyopenms.FloatDataArray() ]
f_da[0].push_back(5.0)
f_da[0].push_back(6.0)
f_da.append( pyopenms.FloatDataArray() )
f_da[1].push_back(8.0)
spec.setFloatDataArrays( f_da )
assert len(spec.getFloatDataArrays()) == 2.0
assert spec.getFloatDataArrays()[0][0] == 5.0
assert spec.getFloatDataArrays()[1][0] == 8.0
spec = pyopenms.MSSpectrum()
data = np.array( [5, 8, 42] ).astype(np.float32)
f_da = [ pyopenms.FloatDataArray() ]
f_da[0].set_data(data)
spec.setFloatDataArrays( f_da )
assert len(spec.getFloatDataArrays()) == 1
assert spec.getFloatDataArrays()[0][0] == 5.0
assert spec.getFloatDataArrays()[0][2] == 42.0
assert len(f_da[0].get_data() ) == 3
@report
def testStringDataArray():
da = pyopenms.StringDataArray()
assert da.size() == 0
da.push_back("hello")
da.push_back("world")
assert da.size() == 2
assert da[0] == b"hello"
assert da[1] == b"world"
da[1] = b"hello world"
assert da[1] == b"hello world", da[1]
da.clear()
assert da.size() == 0
da.push_back("hello")
assert da.size() == 1
da.resize(3)
da[0] = b"hello"
da[1] = b""
da[2] = b"world"
assert da.size() == 3
@report
def testIntegerDataArray():
da = pyopenms.IntegerDataArray()
assert da.size() == 0
da.push_back(1)
da.push_back(4)
assert da.size() == 2
assert da[0] == 1
assert da[1] == 4
da[1] = 7
assert da[1] == 7
da.clear()
assert da.size() == 0
da.push_back(1)
assert da.size() == 1
da.resize(3)
da[0] = 1
da[1] = 2
da[2] = 3
assert da.size() == 3
q = da.get_data()
q = np.append(q, 4).astype(np.intc)
da.set_data(q)
assert da.size() == 4
@report
def testFloatDataArray():
da = pyopenms.FloatDataArray()
assert da.size() == 0
da.push_back(1.0)
da.push_back(4.0)
assert da.size() == 2
assert da[0] == 1.0
assert da[1] == 4.0
da[1] = 7.0
assert da[1] == 7.0
da.clear()
assert da.size() == 0
da.push_back(1.0)
assert da.size() == 1
da.resize(3)
da[0] = 1.0
da[1] = 2.0
da[2] = 3.0
assert da.size() == 3
q = da.get_data()
q = np.append(q, 4.0).astype(np.float32)
da.set_data(q)
assert da.size() == 4
@report
def testMSChromatogram():
chrom = pyopenms.MSChromatogram()
chrom_ = copy.copy(chrom)
assert chrom_ == chrom
chrom_ = copy.deepcopy(chrom)
assert chrom_ == chrom
chrom_ = pyopenms.MSChromatogram(chrom)
assert chrom_ == chrom
_testMetaInfoInterface(chrom)
chrom.setName("chrom")
assert chrom.getName() == "chrom"
p = pyopenms.ChromatogramPeak()
p.setRT(1000.0)
p.setIntensity(200.0)
chrom.push_back(p)
assert chrom.size() == 1
assert chrom[0] == p
chrom.updateRanges()
assert isinstance(chrom.findNearest(0.0), int)
assert isinstance(chrom.getMin()[0], float)
assert isinstance(chrom.getMax()[0], float)
assert isinstance(chrom.getMinInt(), float)
assert isinstance(chrom.getMaxInt(), float)
assert chrom == chrom
assert not chrom != chrom
mz, ii = chrom.get_peaks()
assert len(mz) == len(ii)
assert len(mz) == 1
chrom.set_peaks((mz, ii))
mz0, ii0 = chrom.get_peaks()
assert mz0 == mz
assert ii0 == ii
assert int(chrom.isSorted()) in (0,1)
chrom.clear(False)
p = pyopenms.ChromatogramPeak()
p.setRT(1000.0)
p.setIntensity(200.0)
chrom.push_back(p)
p = pyopenms.ChromatogramPeak()
p.setRT(2000.0)
p.setIntensity(400.0)
chrom.push_back(p)
mz, ii = chrom.get_peaks()
assert chrom[0].getRT() == 1000.0
assert chrom[1].getRT() == 2000.0
assert chrom[0].getIntensity() == 200.0
assert chrom[1].getIntensity() == 400.0
assert mz[0] == 1000.0
assert mz[1] == 2000.0
assert ii[0] == 200.0
assert ii[1] == 400.0
chrom.clear(False)
data_mz = np.array( [5.0, 8.0] ).astype(np.float64)
data_i = np.array( [50.0, 80.0] ).astype(np.float32)
chrom.set_peaks( [data_mz,data_i] )
mz, ii = chrom.get_peaks()
assert chrom[0].getRT() == 5.0
assert chrom[1].getRT() == 8.0
assert chrom[0].getIntensity() == 50.0
assert chrom[1].getIntensity() == 80.0
assert mz[0] == 5.0
assert mz[1] == 8.0
assert ii[0] == 50.0
assert ii[1] == 80.0
chrom.clear(False)
data_mz = np.array( [5.0, 8.0] ).astype(np.float64)
data_i = np.array( [50.0, 80.0] ).astype(np.float64)
chrom.set_peaks( [data_mz,data_i] )
mz, ii = chrom.get_peaks()
assert chrom[0].getRT() == 5.0
assert chrom[1].getRT() == 8.0
assert chrom[0].getIntensity() == 50.0
assert chrom[1].getIntensity() == 80.0
assert mz[0] == 5.0
assert mz[1] == 8.0
assert ii[0] == 50.0
assert ii[1] == 80.0
chrom.clear(False)
data_mz = np.array( [5.0, 8.0] ).astype(np.float32)
data_i = np.array( [50.0, 80.0] ).astype(np.float32)
chrom.set_peaks( [data_mz,data_i] )
mz, ii = chrom.get_peaks()
assert chrom[0].getRT() == 5.0
assert chrom[1].getRT() == 8.0
assert chrom[0].getIntensity() == 50.0
assert chrom[1].getIntensity() == 80.0
assert mz[0] == 5.0
assert mz[1] == 8.0
assert ii[0] == 50.0
assert ii[1] == 80.0
@report
def testMRMFeature():
mrmfeature = pyopenms.MRMFeature()
f = pyopenms.Feature()
fs = mrmfeature.getFeatures()
assert len(fs) == 0
mrmfeature.addFeature(f, "myFeature")
fs = mrmfeature.getFeatures()
assert len(fs) == 1
assert mrmfeature.getFeature("myFeature") is not None
slist = []
mrmfeature.getFeatureIDs(slist)
assert len(slist) == 1
mrmfeature.addPrecursorFeature(f, "myFeature_Pr0")
slist = []
mrmfeature.getPrecursorFeatureIDs(slist)
assert len(slist) == 1
assert mrmfeature.getPrecursorFeature("myFeature_Pr0") is not None
s = mrmfeature.getScores()
assert abs(s.yseries_score - 0.0) < 1e-4
s.yseries_score = 4.0
mrmfeature.setScores(s)
s2 = mrmfeature.getScores()
assert abs(s2.yseries_score - 4.0) < 1e-4
@report
def testConfidenceScoring():
scoring = pyopenms.ConfidenceScoring()
@report
def testMRMDecoy():
mrmdecoy = pyopenms.MRMDecoy()
assert mrmdecoy is not None
assert pyopenms.MRMDecoy().generateDecoys is not None
@report
def testMRMTransitionGroup():
mrmgroup = pyopenms.MRMTransitionGroupCP()
assert mrmgroup is not None
mrmgroup.setTransitionGroupID("this_id")
assert mrmgroup.getTransitionGroupID() == "this_id"
assert len(mrmgroup.getTransitions()) == 0
mrmgroup.addTransition(pyopenms.ReactionMonitoringTransition(), "tr1")
assert len(mrmgroup.getTransitions()) == 1
@report
def testReactionMonitoringTransition():
tr = pyopenms.ReactionMonitoringTransition()
@report
def testTargetedExperiment():
m = pyopenms.TargetedExperiment()
m_ = copy.copy(m)
assert m_ == m
m_ = copy.deepcopy(m)
assert m_ == m
m_ = pyopenms.TargetedExperiment(m)
assert m_ == m
m.clear(True)
m.setCVs(m.getCVs())
targeted = m
targeted.setCVs(targeted.getCVs())
targeted.setTargetCVTerms(targeted.getTargetCVTerms())
targeted.setPeptides(targeted.getPeptides())
targeted.setProteins(targeted.getProteins())
targeted.setTransitions(targeted.getTransitions())
assert m == m
assert not m != m
@report
def testTargetedExperimentHelper():
rtu = pyopenms.RetentionTime.RTUnit()
rtu = pyopenms.RetentionTime.RTUnit.SECOND
rtu = pyopenms.RetentionTime.RTUnit.MINUTE
rtt = pyopenms.RetentionTime.RTType()
rtt = pyopenms.RetentionTime.RTType.LOCAL
rtt = pyopenms.RetentionTime.RTType.NORMALIZED
rtt = pyopenms.RetentionTime.RTType.IRT
rt = pyopenms.RetentionTime()
assert rt.software_ref is not None
assert not rt.isRTset()
rt.setRT(5.0)
rt.retention_time_unit = pyopenms.RetentionTime.RTUnit.SECOND
rt.retention_time_type = pyopenms.RetentionTime.RTType.NORMALIZED
assert rt.isRTset()
assert rt.getRT() == 5.0
p = pyopenms.Peptide()
assert p.rts is not None
assert p.id is not None
assert p.protein_refs is not None
assert p.evidence is not None
assert p.sequence is not None
assert p.mods is not None
assert not p.hasCharge()
p.setChargeState(5)
assert p.hasCharge()
assert p.getChargeState() == 5
assert not p.hasRetentionTime()
p.rts = [rt]
assert p.hasRetentionTime()
assert p.getRetentionTime() == 5.0
assert p.getRetentionTimeUnit() == pyopenms.RetentionTime.RTUnit.SECOND
assert p.getRetentionTimeType() == pyopenms.RetentionTime.RTType.NORMALIZED
c = pyopenms.Compound()
assert c.rts is not None
assert c.id is not None
assert c.molecular_formula is not None
assert c.smiles_string is not None
assert c.theoretical_mass is not None
assert not c.hasCharge()
c.setChargeState(5)
assert c.hasCharge()
assert c.getChargeState() == 5
assert not c.hasRetentionTime()
c.rts = [rt]
assert c.hasRetentionTime()
assert c.getRetentionTime() == 5.0
assert c.getRetentionTimeUnit() == pyopenms.RetentionTime.RTUnit.SECOND
assert c.getRetentionTimeType() == pyopenms.RetentionTime.RTType.NORMALIZED
@report
def testMapAlignment():
ma = pyopenms.MapAlignmentAlgorithmPoseClustering()
assert isinstance(ma.getDefaults(), pyopenms.Param)
assert isinstance(ma.getParameters(), pyopenms.Param)
_testStrOutput(ma.getName())
ma.setName(ma.getName())
ma.getDefaults()
ma.getParameters()
ma.setParameters(ma.getDefaults())
ma.setReference
ma.align
pyopenms.MapAlignmentTransformer.transformRetentionTimes
@report
def testMatrixDouble():
m = pyopenms.MatrixDouble()
N = 90
m.resize(N-1, N+2, 5.0)
assert m.rows() == 89
assert m.cols() == 92
rows = N-1
cols = N+2
test = []
for i in range(int(rows)):
for j in range(int(cols)):
test.append( m.getValue(i,j) )
testm = np.asarray(test)
testm = testm.reshape(rows, cols)
assert sum(sum(testm)) == 40940.0
assert sum(sum(testm)) == (N-1)*(N+2)*5
matrix = m.get_matrix()
assert sum(sum(matrix)) == 40940.0
assert sum(sum(matrix)) == (N-1)*(N+2)*5
matrix_view = m.get_matrix_as_view()
assert sum(sum(matrix_view)) == 40940.0
assert sum(sum(matrix_view)) == (N-1)*(N+2)*5
ue(3, 5) == 5.0
m.setValue(3, 5, 8.0)
assert m.getValue(3, 5) == 8.0
mat = m.get_matrix_as_view()
assert mat[3, 5] == 8.0
mat = m.get_matrix()
assert m.getValue(3, 5) == 8.0
assert mat[3, 5] == 8.0
matrix_view = m.get_matrix_as_view()
matrix_view[1, 6] = 11.0
assert m.getValue(1, 6) == 11.0
assert matrix_view[1, 6] == 11.0
m.clear()
assert m.rows() == 0
assert m.cols() == 0
mat[3, 6] = 9.0
m.set_matrix(mat)
assert m.getValue(3, 5) == 8.0
assert m.getValue(3, 6) == 9.0
@report
def testMapAlignmentIdentification():
ma = pyopenms.MapAlignmentAlgorithmIdentification()
assert pyopenms.MapAlignmentAlgorithmIdentification().align is not None
assert pyopenms.MapAlignmentAlgorithmIdentification().setReference is not None
@report
def testMapAlignmentTransformer():
ma = pyopenms.MapAlignmentTransformer()
assert pyopenms.MapAlignmentTransformer().transformRetentionTimes is not None
@report
def testMxxxFile():
mse = pyopenms.MSExperiment()
s = pyopenms.MSSpectrum()
mse.addSpectrum(s)
fh = pyopenms.MzDataFile()
_testProgressLogger(fh)
fh.store("test.mzData", mse)
fh.load("test.mzData", mse)
fh.setOptions(fh.getOptions())
fh = pyopenms.MzMLFile()
_testProgressLogger(fh)
fh.store("test.mzML", mse)
fh.load("test.mzML", mse)
fh.setOptions(fh.getOptions())
myStr = pyopenms.String()
fh.storeBuffer(myStr, mse)
assert len(myStr.toString()) == 5269
mse2 = pyopenms.MSExperiment()
fh.loadBuffer(bytes(myStr), mse2)
assert mse2 == mse
assert mse2.size() == 1
fh = pyopenms.MzXMLFile()
_testProgressLogger(fh)
fh.store("test.mzXML", mse)
fh.load("test.mzXML", mse)
fh.setOptions(fh.getOptions())
fh = pyopenms.MzQuantMLFile()
fh.isSemanticallyValid
fh.load
fh.store
@report
def testParamXMLFile():
fh = pyopenms.ParamXMLFile()
p = pyopenms.Param()
fh.store("test.ini", p)
fh.load("test.ini", p)
@report
def testPeak():
p1 = pyopenms.Peak1D()
p1.setIntensity(12.0)
assert p1.getIntensity() == 12.0
p1.setMZ(13.0)
assert p1.getMZ() == 13.0
assert p1 == p1
assert not p1 != p1
p2 = pyopenms.Peak2D()
assert p2 == p2
assert not p2 != p2
p2.setIntensity(22.0)
assert p2.getIntensity() == 22.0
p2.setMZ(23.0)
assert p2.getMZ() == 23.0
p2.setRT(45.0)
assert p2.getRT() == 45.0
@report
def testNumpressCoder():
np = pyopenms.MSNumpressCoder()
nc = pyopenms.NumpressConfig()
nc.np_compression = np.NumpressCompression.LINEAR
nc.estimate_fixed_point = True
tmp = pyopenms.String()
out = []
inp = [1.0, 2.0, 3.0]
np.encodeNP(inp, tmp, True, nc)
res = tmp.toString()
assert len(res) != 0, len(res)
assert res != "", res
np.decodeNP(res, out, True, nc)
assert len(out) == 3, (out, res)
assert out == inp, out
res = ""
try:
np.encodeNP(inp, res, True, nc)
has_error = False
except AssertionError:
has_error = True
assert has_error
@report
def testNumpressConfig():
n = pyopenms.MSNumpressCoder()
np = pyopenms.NumpressConfig()
np.np_compression = n.NumpressCompression.LINEAR
assert np.np_compression == n.NumpressCompression.LINEAR
np.numpressFixedPoint = 4.2
np.numpressErrorTolerance = 4.2
np.estimate_fixed_point = True
np.linear_fp_mass_acc = 4.2
np.setCompression("linear")
@report
def testBase64():
b = pyopenms.Base64()
out = pyopenms.String()
inp = [1.0, 2.0, 3.0]
b.encode64(inp, b.ByteOrder.BYTEORDER_LITTLEENDIAN, out, False)
res = out.toString()
assert len(res) != 0
assert res != ""
convBack = []
b.decode64(res, b.ByteOrder.BYTEORDER_LITTLEENDIAN, convBack, False)
assert convBack == inp, convBack
out = pyopenms.String()
b.encode32(inp, b.ByteOrder.BYTEORDER_LITTLEENDIAN, out, False)
res = out.toString()
assert len(res) != 0
assert res != ""
convBack = []
b.decode32(res, b.ByteOrder.BYTEORDER_LITTLEENDIAN, convBack, False)
assert convBack == inp, convBack
@report
def testPeakFileOptions():
pfo = pyopenms.PeakFileOptions()
pfo.addMSLevel
pfo.clearMSLevels()
pfo.containsMSLevel(1)
pfo.getCompression()
pfo.getMSLevels()
pfo.getMetadataOnly()
pfo.getWriteSupplementalData()
pfo.hasMSLevels()
pfo.setCompression
pfo.setMSLevels
pfo.setMetadataOnly
pfo.setWriteSupplementalData
@report
def testMRMMapping():
p = pyopenms.MRMMapping()
assert p.mapExperiment is not None
e = pyopenms.MSExperiment()
c = pyopenms.MSChromatogram()
e.addChromatogram(c)
assert e.getNrChromatograms() == 1
o = pyopenms.MSExperiment()
t = pyopenms.TargetedExperiment()
p.mapExperiment(e, t, o)
assert o.getNrChromatograms() == 0
@report
def testPeakPickerMRM():
p = pyopenms.PeakPickerMRM()
assert p.pickChromatogram is not None
@report
def testPeakPickerHiRes():
p = pyopenms.PeakPickerHiRes()
assert p.pick is not None
assert p.pickExperiment is not None
@report
def testPeakTypeEstimator():
pyopenms.PeakTypeEstimator().estimateType(pyopenms.MSSpectrum())
@report
def testPeptideHit():
ph = pyopenms.PeptideHit()
assert ph == ph
assert not ph != ph
ph = pyopenms.PeptideHit(1.0, 1, 0, pyopenms.AASequence.fromString("A"))
_testMetaInfoInterface(ph)
assert len(ph.getPeptideEvidences()) == 0
assert ph.getPeptideEvidences() == []
pe = pyopenms.PeptideEvidence()
pe.setProteinAccession('B_id')
ph.addPeptideEvidence(pe)
assert len(ph.getPeptideEvidences()) == 1
assert ph.getPeptideEvidences()[0].getProteinAccession() == 'B_id'
ph.setPeptideEvidences([pe,pe])
assert len(ph.getPeptideEvidences()) == 2
assert ph.getPeptideEvidences()[0].getProteinAccession() == 'B_id'
assert ph.getScore() == 1.0
assert ph.getRank() == 1
assert ph.getSequence().toString() == "A"
ph.setScore(2.0)
assert ph.getScore() == 2.0
ph.setRank(30)
assert ph.getRank() == 30
ph.setSequence(pyopenms.AASequence.fromString("AAA"))
assert ph.getSequence().toString() == "AAA"
assert ph == ph
assert not ph != ph
@report
def testPeptideEvidence():
pe = pyopenms.PeptideEvidence()
assert pe == pe
assert not pe != pe
pe.setProteinAccession('B_id')
assert pe.getProteinAccession() == "B_id"
pe.setAABefore(b'A')
assert pe.getAABefore() == 'A'
pe.setAAAfter(b'C')
assert pe.getAAAfter() == 'C'
pe.setStart(5)
assert pe.getStart() == 5
pe.setEnd(9)
assert pe.getEnd() == 9
assert pe == pe
assert not pe != pe
@report
def testPeptideIdentification():
pi = pyopenms.PeptideIdentification()
_testMetaInfoInterface(pi)
assert pi == pi
assert not pi != pi
pe = pyopenms.PeptideEvidence()
pe.setProteinAccession('B_id')
ph = pyopenms.PeptideHit(1.0, 1, 0, pyopenms.AASequence.fromString("A"))
ph.addPeptideEvidence(pe)
pi.insertHit(ph)
phx, = pi.getHits()
assert phx == ph
pi.setHits([ph])
phx, = pi.getHits()
assert phx == ph
rv = set([])
peptide_hits = pi.getReferencingHits(pi.getHits(), rv)
assert rv == set([])
assert isinstance(pi.getSignificanceThreshold(), float)
_testStrOutput(pi.getScoreType())
pi.setScoreType("A")
assert isinstance(pi.isHigherScoreBetter(), int)
_testStrOutput(pi.getIdentifier())
pi.setIdentifier("id")
pi.assignRanks()
pi.sort()
assert not pi.empty()
pi.setSignificanceThreshold(6.0)
@report
def testPolarity():
assert isinstance(pyopenms.IonSource.Polarity.NEGATIVE, int)
assert isinstance(pyopenms.IonSource.Polarity.POLNULL, int)
assert isinstance(pyopenms.IonSource.Polarity.POSITIVE, int)
@report
def testPrecursor():
pc = pyopenms.Precursor()
pc.setMZ(123.0)
pc.setIntensity(12.0)
assert pc.getMZ() == 123.0
assert pc.getIntensity() == 12.0
pc.setActivationMethods(pc.getActivationMethods())
pc.setActivationEnergy(6.0)
pc.getActivationEnergy()
pc.setIsolationWindowUpperOffset(500.0)
pc.getIsolationWindowUpperOffset()
pc.setIsolationWindowLowerOffset(600.0)
pc.getIsolationWindowLowerOffset()
pc.setCharge(2)
pc.getCharge()
pc.setPossibleChargeStates(pc.getPossibleChargeStates())
pc.getUnchargedMass()
@report
def testProcessingAction():
assert isinstance(pyopenms.ProcessingAction.ALIGNMENT, int)
assert isinstance(pyopenms.ProcessingAction.BASELINE_REDUCTION, int)
assert isinstance(pyopenms.ProcessingAction.CALIBRATION, int)
assert isinstance(pyopenms.ProcessingAction.CHARGE_CALCULATION, int)
assert isinstance(pyopenms.ProcessingAction.CHARGE_DECONVOLUTION, int)
assert isinstance(pyopenms.ProcessingAction.CONVERSION_DTA, int)
assert isinstance(pyopenms.ProcessingAction.CONVERSION_MZDATA, int)
assert isinstance(pyopenms.ProcessingAction.CONVERSION_MZML, int)
assert isinstance(pyopenms.ProcessingAction.CONVERSION_MZXML, int)
assert isinstance(pyopenms.ProcessingAction.DATA_PROCESSING, int)
assert isinstance(pyopenms.ProcessingAction.DEISOTOPING, int)
assert isinstance(pyopenms.ProcessingAction.FEATURE_GROUPING, int)
assert isinstance(pyopenms.ProcessingAction.FILTERING, int)
assert isinstance(pyopenms.ProcessingAction.FORMAT_CONVERSION, int)
assert isinstance(pyopenms.ProcessingAction.IDENTIFICATION_MAPPING, int)
assert isinstance(pyopenms.ProcessingAction.NORMALIZATION, int)
assert isinstance(pyopenms.ProcessingAction.PEAK_PICKING, int)
assert isinstance(pyopenms.ProcessingAction.PRECURSOR_RECALCULATION, int)
assert isinstance(pyopenms.ProcessingAction.QUANTITATION, int)
assert isinstance(pyopenms.ProcessingAction.SIZE_OF_PROCESSINGACTION, int)
assert isinstance(pyopenms.ProcessingAction.SMOOTHING, int)
@report
def testProduct():
p = pyopenms.Product()
p.setMZ(12.0)
p.setIsolationWindowLowerOffset(10.0)
p.setIsolationWindowUpperOffset(15.0)
assert p.getMZ() == 12.0
assert p.getIsolationWindowLowerOffset() == 10.0
assert p.getIsolationWindowUpperOffset() == 15.0
assert p == p
assert not p != p
@report
def testProteinHit():
ph = pyopenms.ProteinHit()
assert ph == ph
assert not ph != ph
_testMetaInfoInterface(ph)
ph.setAccession("A")
ph.setCoverage(0.5)
ph.setRank(2)
ph.setScore(1.5)
ph.setSequence("ABA")
assert ph.getAccession() == ("A")
assert ph.getCoverage() == (0.5)
assert ph.getRank() == (2)
assert ph.getScore() == (1.5)
assert ph.getSequence() == ("ABA")
@report
def testProteinIdentification():
pi = pyopenms.ProteinIdentification()
_testMetaInfoInterface(pi)
assert pi == pi
assert not pi != pi
assert pi.getHits() == []
ph = pyopenms.ProteinHit()
pi.insertHit(ph)
ph2, = pi.getHits()
assert ph2 == ph
pi.setHits([ph])
ph2, = pi.getHits()
assert ph2 == ph
assert isinstance(pyopenms.ProteinIdentification.PeakMassType.MONOISOTOPIC, int)
assert isinstance(pyopenms.ProteinIdentification.PeakMassType.AVERAGE, int)
@report
def testRichPeak():
p2 = pyopenms.RichPeak2D()
_testMetaInfoInterface(p2)
_testUniqueIdInterface(p2)
assert p2 == p2
assert not p2 != p2
p2.setMZ(22.0)
p2.setIntensity(23.0)
p2.setRT(43.0)
assert p2.getMZ() == (22.0)
assert p2.getIntensity() == (23.0)
assert p2.getRT() == (43.0)
@report
def testSoftware():
sw = pyopenms.Software()
sw.setName("name")
sw.setVersion("1.0.0")
assert sw.getName() == "name"
assert sw.getVersion() == "1.0.0"
@report
def testSourceFile():
sf = pyopenms.SourceFile()
sf.setNameOfFile("file.txt")
assert sf.getNameOfFile() == "file.txt"
sf.setPathToFile("file.txt")
assert sf.getPathToFile() == "file.txt"
sf.setFileType(".txt")
assert sf.getFileType() == ".txt"
sf.setChecksum("abcde000", pyopenms.ChecksumType.UNKNOWN_CHECKSUM)
assert sf.getChecksum() == "abcde000"
assert sf.getChecksumType() in (pyopenms.ChecksumType.UNKNOWN_CHECKSUM,
pyopenms.ChecksumType.SHA1,
pyopenms.ChecksumType.MD5)
@report
def testSpectrumSetting(s=pyopenms.SpectrumSettings()):
assert s.getType() in [ pyopenms.SpectrumSettings.SpectrumType.UNKNOWN,
pyopenms.SpectrumSettings.SpectrumType.PEAKS,
pyopenms.SpectrumSettings.SpectrumType.RAWDATA]
assert isinstance(s.getAcquisitionInfo(), pyopenms.AcquisitionInfo)
assert isinstance(s.getInstrumentSettings(), pyopenms.InstrumentSettings)
assert isinstance(s.getSourceFile(), pyopenms.SourceFile)
assert isinstance(s.getPeptideIdentifications(), list)
assert isinstance(s.getDataProcessing(), list)
s.setAcquisitionInfo(s.getAcquisitionInfo())
s.setInstrumentSettings(s.getInstrumentSettings())
s.setSourceFile(s.getSourceFile())
s.setPeptideIdentifications(s.getPeptideIdentifications())
s.setDataProcessing(s.getDataProcessing())
s.setComment(s.getComment())
s.setPrecursors(s.getPrecursors())
s.setProducts(s.getProducts())
s.setType(s.getType())
s.setNativeID(s.getNativeID())
s.setType(s.getType())
if isinstance(s, pyopenms.SpectrumSettings):
s.unify(s)
@report
def testTransformationDescription():
td = pyopenms.TransformationDescription()
assert td.getDataPoints() == []
assert isinstance(td.apply(0.0), float)
td.fitModel
p = td.getModelParameters()
td.getModelType()
td.invert
@report
def testTransformationModels():
for clz in [pyopenms.TransformationModelLinear,
pyopenms.TransformationModelBSpline,
pyopenms.TransformationModelInterpolated,
pyopenms.TransformationModelLowess]:
p = pyopenms.Param()
data = [ pyopenms.TM_DataPoint(9.0, 8.9),
pyopenms.TM_DataPoint(5.0, 6.0),
pyopenms.TM_DataPoint(8.0, 8.0) ]
mod = clz(data, p)
mod.evaluate(7.0)
mod.getDefaultParameters(p)
@report
def testTransformationXMLFile():
fh = pyopenms.TransformationXMLFile()
td = pyopenms.TransformationDescription()
fh.store("test.transformationXML", td)
fh.load("test.transformationXML", td, True)
assert td.getDataPoints() == []
@report
def testIBSpectraFile():
fh = pyopenms.IBSpectraFile()
cmap = pyopenms.ConsensusMap()
correctError = False
try:
fh.store( pyopenms.String("test.ibspectra.file"), cmap)
assert False
except RuntimeError:
correctError = True
assert correctError
@report
def testSwathFile():
fh = pyopenms.SwathFile()
@report
def testType():
for ti in [
pyopenms.FileType.CONSENSUSXML
,pyopenms.FileType.DTA
,pyopenms.FileType.DTA2D
,pyopenms.FileType.EDTA
,pyopenms.FileType.FASTA
,pyopenms.FileType.FEATUREXML
,pyopenms.FileType.GELML
,pyopenms.FileType.HARDKLOER
,pyopenms.FileType.IDXML
,pyopenms.FileType.INI
,pyopenms.FileType.KROENIK
,pyopenms.FileType.MASCOTXML
,pyopenms.FileType.MGF
,pyopenms.FileType.MS2
,pyopenms.FileType.MSP
,pyopenms.FileType.MZDATA
,pyopenms.FileType.MZIDENTML
,pyopenms.FileType.MZML
,pyopenms.FileType.MZXML
,pyopenms.FileType.OMSSAXML
,pyopenms.FileType.PEPLIST
,pyopenms.FileType.PEPXML
,pyopenms.FileType.PNG
,pyopenms.FileType.PROTXML
,pyopenms.FileType.SIZE_OF_TYPE
,pyopenms.FileType.TOPPAS
,pyopenms.FileType.TRAML
,pyopenms.FileType.TRANSFORMATIONXML
,pyopenms.FileType.TSV
,pyopenms.FileType.UNKNOWN
,pyopenms.FileType.XMASS]:
assert isinstance(ti, int)
@report
def testVersion():
_testStrOutput(pyopenms.VersionInfo.getVersion())
_testStrOutput(pyopenms.VersionInfo.getRevision())
_testStrOutput(pyopenms.VersionInfo.getTime())
vd = pyopenms.VersionDetails.create("19.2.1")
assert vd.version_major == 19
assert vd.version_minor == 2
assert vd.version_patch == 1
vd = pyopenms.VersionDetails.create("19.2.1-alpha")
assert vd.version_major == 19
assert vd.version_minor == 2
assert vd.version_patch == 1
assert vd.pre_release_identifier == "alpha"
assert vd == vd
assert not vd < vd
assert not vd > vd
assert isinstance(pyopenms.version.version, str)
@report
def testInspectInfile():
inst = pyopenms.InspectInfile()
assert inst.getModifications is not None
mods = inst.getModifications()
assert len(mods) == 0
@report
def testIsotopeMarker():
inst = pyopenms.IsotopeMarker()
ptr = inst.create()
assert ptr.apply is not None
res = {}
spec = pyopenms.MSSpectrum()
ptr.apply(res, spec)
@report
def testAttachment():
inst = pyopenms.Attachment()
assert inst.name is not None
assert inst.value is not None
assert inst.cvRef is not None
assert inst.cvAcc is not None
assert inst.unitRef is not None
assert inst.unitAcc is not None
assert inst.binary is not None
assert inst.qualityRef is not None
assert inst.colTypes is not None
assert inst.tableRows is not None
assert inst.toXMLString is not None
assert inst.toCSVString is not None
inst.name = "test"
inst.value = "test"
inst.cvRef = "test"
inst.cvAcc = "test"
inst.unitRef = "test"
inst.unitAcc = "test"
inst.binary = "test"
inst.qualityRef = "test"
inst.colTypes = [ b"test", b"test2"]
inst.tableRows = [ [b"test", b"test2"], [b"otherTest"] ]
assert inst.tableRows[1][0] == b"otherTest"
@report
def testOptimizePeakDeconvolution():
inst = pyopenms.OptimizePeakDeconvolution()
assert inst.getParameters
assert inst.getPenalties is not None
assert inst.setPenalties is not None
assert inst.getCharge is not None
assert inst.setCharge is not None
assert inst.optimize is not None
inst = pyopenms.PenaltyFactorsIntensity()
assert inst.height is not None
inst = pyopenms.OptimizePeakDeconvolution_Data()
assert inst.peaks is not None
assert inst.peaks is not None
assert inst.signal is not None
assert inst.penalties is not None
assert inst.charge is not None
@report
def testKernelMassTrace():
trace = pyopenms.Kernel_MassTrace()
assert trace.getSize is not None
assert trace.getLabel is not None
assert trace.setLabel is not None
assert trace.getCentroidMZ is not None
assert trace.getCentroidRT is not None
assert trace.getCentroidSD is not None
assert trace.getFWHM is not None
assert trace.getTraceLength is not None
assert trace.getFWHMborders is not None
assert trace.getSmoothedIntensities is not None
assert trace.getAverageMS1CycleTime is not None
assert trace.computeSmoothedPeakArea is not None
assert trace.computePeakArea is not None
assert trace.findMaxByIntPeak is not None
assert trace.estimateFWHM is not None
assert trace.computeFwhmArea is not None
assert trace.computeFwhmAreaSmooth is not None
assert trace.getIntensity is not None
assert trace.getMaxIntensity is not None
assert trace.getConvexhull is not None
assert trace.setCentroidSD is not None
assert trace.setSmoothedIntensities is not None
assert trace.updateSmoothedMaxRT is not None
assert trace.updateWeightedMeanRT is not None
assert trace.updateSmoothedWeightedMeanRT is not None
assert trace.updateMedianRT is not None
assert trace.updateMedianMZ is not None
assert trace.updateMeanMZ is not None
assert trace.updateWeightedMeanMZ is not None
assert trace.updateWeightedMZsd is not None
s = trace.getSize()
@report
def testElutionPeakDetection():
detection = pyopenms.ElutionPeakDetection()
assert detection.detectPeaks is not None
assert detection.filterByPeakWidth is not None
assert detection.computeMassTraceNoise is not None
assert detection.computeMassTraceSNR is not None
assert detection.computeApexSNR is not None
assert detection.findLocalExtrema is not None
assert detection.smoothData is not None
trace = pyopenms.Kernel_MassTrace()
detection.smoothData(trace, 4)
@report
def testIndexedMzMLDecoder():
decoder = pyopenms.IndexedMzMLDecoder()
try:
pos = decoder.findIndexListOffset("abcde", 100)
raise Exception("Should raise an error")
except RuntimeError:
pass
def test_streampos():
p = long(pyopenms.streampos())
assert isinstance(p, long), "got %r" % p
def test_MapConversion():
feature = pyopenms.Feature()
feature.setRT(99)
cmap = pyopenms.ConsensusMap()
fmap = pyopenms.FeatureMap()
fmap.push_back(feature)
pyopenms.MapConversion().convert(0, fmap, cmap, 1)
assert(cmap.size() == 1)
assert(cmap[0].getRT() == 99.0)
fmap = pyopenms.FeatureMap()
pyopenms.MapConversion().convert(cmap, True, fmap)
assert(fmap.size() == 1)
assert(fmap[0].getRT() == 99.0)
exp = pyopenms.MSExperiment()
sp = pyopenms.MSSpectrum()
peak = pyopenms.Peak1D()
peak.setIntensity(10)
peak.setMZ(20)
sp.push_back(peak)
exp.addSpectrum(sp)
exp.addSpectrum(sp)
cmap = pyopenms.ConsensusMap()
pyopenms.MapConversion().convert(0, exp, cmap, 2)
assert(cmap.size() == 2)
assert(cmap[0].getIntensity() == 10.0)
assert(cmap[0].getMZ() == 20.0)
def test_BSpline2d():
x = [1.0, 6.0, 8.0, 10.0, 15.0]
y = [2.0, 5.0, 6.0, 12.0, 13.0]
spline = pyopenms.BSpline2d(x,y,0, pyopenms.BoundaryCondition.BC_ZERO_ENDPOINTS, 0)
assert spline.ok()
assert abs(spline.eval(6.0) - 5.0 < 0.01)
assert abs(spline.derivative(6.0) - 5.0 < 0.01)
y_new = [4.0, 5.0, 6.0, 12.0, 13.0]
spline.solve(y_new)
assert spline.ok()
assert abs(spline.eval(6.0) - 5.0 < 0.01)
@report
def testConsensusIDAlgorithmAverage():
algo = pyopenms.ConsensusIDAlgorithmAverage()
assert algo.apply
@report
def testConsensusIDAlgorithmBest():
algo = pyopenms.ConsensusIDAlgorithmBest()
assert algo.apply
@report
def testConsensusIDAlgorithmIdentity():
algo = pyopenms.ConsensusIDAlgorithmIdentity()
assert algo.apply
@report
def testConsensusIDAlgorithmPEPIons():
algo = pyopenms.ConsensusIDAlgorithmPEPIons()
assert algo.apply
@report
def testConsensusIDAlgorithmPEPMatrix():
algo = pyopenms.ConsensusIDAlgorithmPEPMatrix()
assert algo.apply
@report
def testConsensusIDAlgorithmRanks():
algo = pyopenms.ConsensusIDAlgorithmRanks()
assert algo.apply
@report
def testConsensusIDAlgorithmSimilarity():
algo = pyopenms.ConsensusIDAlgorithmSimilarity()
assert algo.apply
@report
def testConsensusIDAlgorithmWorst():
algo = pyopenms.ConsensusIDAlgorithmWorst()
assert algo.apply
@report
def testDigestionEnzymeProtein():
f = pyopenms.EmpiricalFormula()
regex_description = ""
psi_id = ""
xtandem_id = ""
comet_id = 0
omssa_id = 0
e = pyopenms.DigestionEnzymeProtein("testEnzyme", "K", set([]), regex_description,
f, f, psi_id, xtandem_id, comet_id, omssa_id)
@report
def testMRMAssay():
e = pyopenms.MRMAssay()
assert e
@report
def testMRMIonSeries():
e = pyopenms.MRMIonSeries()
assert e
@report
def testPeptideIndexing():
e = pyopenms.PeptideIndexing()
assert e
@report
def testPeptideProteinResolution():
e = pyopenms.PeptideProteinResolution(False)
assert e
@report
def testPercolatorOutfile():
e = pyopenms.PercolatorOutfile()
assert e
@report
def testHiddenMarkovModel():
hmm = pyopenms.HiddenMarkovModel()
assert hmm
assert hmm.getNumberOfStates() == 0
ss = s("testState")
hmm.addNewState(ss)
assert hmm.getNumberOfStates() == 1
e = pyopenms.HMMState()
m.getState(s("testState"))
assert r
nms.HMMState()
assert e
e.setName(s("somename"))
assert e.getName() == "somename", e.getName()
e.setHidden(True)
assert e.isHidden()
pre = pyopenms.HMMState()
pre.setName(s("pre"))
suc = pyopenms.HMMState()
suc.setName(s("suc"))
e.addPredecessorState(pre)
e.addSuccessorState(suc)
assert e.getPredecessorStates()
assert e.getSuccessorStates()
@report
def testProteaseDB():
edb = pyopenms.ProteaseDB()
f = pyopenms.EmpiricalFormula()
synonyms = set(["dummy", "other"])
assert edb.hasEnzyme(pyopenms.String("Trypsin"))
trypsin = edb.getEnzyme(pyopenms.String("Trypsin"))
names = []
edb.getAllNames(names)
assert b"Trypsin" in names
@report
def testElementDB():
edb = pyopenms.ElementDB()
del edb
edb = pyopenms.ElementDB()
assert edb.hasElement(16)
edb.hasElement(pyopenms.String("O"))
e = edb.getElement(16)
assert e.getName() == "Sulfur"
assert e.getSymbol() == "S"
assert e.getIsotopeDistribution()
e2 = edb.getElement(pyopenms.String("O"))
assert e2.getName() == "Oxygen"
assert e2.getSymbol() == "O"
assert e2.getIsotopeDistribution()
@report
def testDPosition():
dp = pyopenms.DPosition1()
dp = pyopenms.DPosition1(1.0)
assert dp[0] == 1.0
dp = pyopenms.DPosition2()
dp = pyopenms.DPosition2(1.0, 2.0)
assert dp[0] == 1.0
assert dp[1] == 2.0
@report
def testResidueDB():
rdb = pyopenms.ResidueDB()
del rdb
rdb = pyopenms.ResidueDB()
assert rdb.getNumberOfResidues() >= 20
assert len(rdb.getResidueSets() ) >= 1
el = rdb.getResidues(pyopenms.String(rdb.getResidueSets().pop()))
assert len(el) >= 1
assert rdb.hasResidue(s("Glycine"))
glycine = rdb.getResidue(s("Glycine"))
nrr = rdb.getNumberOfResidues()
@report
def testModificationsDB():
mdb = pyopenms.ModificationsDB()
del mdb
mdb = pyopenms.ModificationsDB()
assert mdb.getNumberOfModifications() > 1
m = mdb.getModification(1)
assert mdb.getNumberOfModifications() > 1
m = mdb.getModification(1)
assert m is not None
mods = set([])
mdb.searchModifications(mods, s("Phosphorylation"), s("T"), pyopenms.ResidueModification.TermSpecificity.ANYWHERE)
assert len(mods) == 1
mods = set([])
mdb.searchModifications(mods, s("NIC"), s("T"), pyopenms.ResidueModification.TermSpecificity.N_TERM)
assert len(mods) == 1
mods = set([])
mdb.searchModifications(mods, s("NIC"), s("T"), pyopenms.ResidueModification.TermSpecificity.N_TERM)
assert len(mods) == 1
mods = set([])
mdb.searchModifications(mods, s("Acetyl"), s("T"), pyopenms.ResidueModification.TermSpecificity.N_TERM)
assert len(mods) == 1
assert list(mods)[0].getFullId() == "Acetyl (N-term)"
m = mdb.getModification(s("Carboxymethyl (C)"), "", pyopenms.ResidueModification.TermSpecificity.NUMBER_OF_TERM_SPECIFICITY)
assert m.getFullId() == "Carboxymethyl (C)"
m = mdb.getModification( s("Phosphorylation"), s("S"), pyopenms.ResidueModification.TermSpecificity.ANYWHERE)
assert m.getId() == "Phospho"
mods = []
m = mdb.getAllSearchModifications(mods)
assert len(mods) > 100
assert b"Phospho (S)" in mods
assert b"Sulfo (S)" in mods
assert not (b"Phospho" in mods)
m = mdb.getBestModificationByDiffMonoMass( 80.0, 1.0, "T", pyopenms.ResidueModification.TermSpecificity.ANYWHERE)
assert m is not None
assert m.getId() == "Phospho"
assert m.getFullName() == "Phosphorylation"
assert m.getUniModAccession() == "UniMod:21"
m = mdb.getBestModificationByDiffMonoMass(80, 100, "T", pyopenms.ResidueModification.TermSpecificity.ANYWHERE)
assert m is not None
assert m.getId() == "Phospho"
assert m.getFullName() == "Phosphorylation"
assert m.getUniModAccession() == "UniMod:21"
m = mdb.getBestModificationByDiffMonoMass(16, 1.0, "M", pyopenms.ResidueModification.TermSpecificity.ANYWHERE)
assert m is not None
assert m.getId() == "Oxidation", m.getId()
assert m.getFullName() == "Oxidation or Hydroxylation", m.getFullName()
assert m.getUniModAccession() == "UniMod:35"
@report
def testRNaseDB():
db = pyopenms.RNaseDB()
names = []
db.getAllNames(names)
e = db.getEnzyme("RNase_T1")
assert e.getRegEx() == u'(?<=G)'
assert e.getThreePrimeGain() == u'p'
assert db.hasRegEx(u'(?<=G)')
assert db.hasEnzyme("RNase_T1")
@report
def testRibonucleotideDB():
r = pyopenms.RibonucleotideDB()
uridine = r.getRibonucleotide(b"U")
assert uridine.getName() == u'uridine'
assert uridine.getCode() == u'U'
assert uridine.getFormula().toString() == u'C9H12N2O6'
assert uridine.isModified() == False
@report
def testRibonucleotide():
r = pyopenms.Ribonucleotide()
assert not r.isModified()
r.setHTMLCode("test")
assert r.getHTMLCode() == "test"
r.setOrigin(b"A")
assert r.getOrigin() == "A"
r.setNewCode(b"A")
assert r.getNewCode() == "A"
@report
def testRNaseDigestion():
dig = pyopenms.RNaseDigestion()
dig.setEnzyme("RNase_T1")
assert dig.getEnzymeName() == "RNase_T1"
oligo = pyopenms.NASequence.fromString("pAUGUCGCAG");
result = []
dig.digest(oligo, result)
assert len(result) == 3
@report
def testNASequence():
oligo = pyopenms.NASequence.fromString("pAUGUCGCAG");
assert oligo.size() == 9
seq_formula = oligo.getFormula()
seq_formula.toString() == u'C86H108N35O64P9'
oligo_mod = pyopenms.NASequence.fromString("A[m1A][Gm]A")
seq_formula = oligo_mod.getFormula()
seq_formula.toString() == u'C42H53N20O23P3'
for r in oligo:
pass
assert oligo_mod[1].isModified()
charge = 2
oligo_mod.getMonoWeight(pyopenms.NASequence.NASFragmentType.WIon, charge)
oligo_mod.getFormula(pyopenms.NASequence.NASFragmentType.WIon, charge)
@report
def testExperimentalDesign():
f = pyopenms.ExperimentalDesignFile()
fourplex_fractionated_design = pyopenms.ExperimentalDesign()
ed_dirname = os.path.dirname(os.path.abspath(__file__))
ed_filename = os.path.join(ed_dirname, "ExperimentalDesign_input_2.tsv").encode()
fourplex_fractionated_design = pyopenms.ExperimentalDesignFile.load(ed_filename, False)
assert fourplex_fractionated_design.getNumberOfSamples() == 8
assert fourplex_fractionated_design.getNumberOfFractions() == 3
assert fourplex_fractionated_design.getNumberOfLabels() == 4
assert fourplex_fractionated_design.getNumberOfMSFiles() == 6
assert fourplex_fractionated_design.getNumberOfFractionGroups() == 2
assert fourplex_fractionated_design.getSample(1, 1) == 1
assert fourplex_fractionated_design.getSample(2, 4) == 8
assert fourplex_fractionated_design.isFractionated()
assert fourplex_fractionated_design.sameNrOfMSFilesPerFraction()
@report
def testString():
pystr = pyopenms.String()
pystr = pyopenms.String("blah")
assert (pystr.toString() == "blah")
pystr = pyopenms.String("blah")
assert (pystr.toString() == "blah")
pystr = pyopenms.String(u"blah")
assert (pystr.toString() == "blah")
pystr = pyopenms.String(pystr)
assert (pystr.toString() == "blah")
assert (len(pystr.toString()) == 4)
cstr = pystr.c_str()
print(cstr)
print(pystr)
print(pystr.toString())
assert (pystr.toString() == "blah")
pystr = pyopenms.String("bläh")
assert (pystr.toString() == u"bläh")
pystr = pyopenms.String("bläh")
pystr = pyopenms.String(u"bläh")
assert (pystr.toString() == u"bläh")
pystr = pyopenms.String(pystr)
assert (pystr.toString() == u"bläh")
cstr = pystr.c_str()
print(cstr)
print(pystr)
print(pystr.toString().encode("utf8"))
assert len(pystr.toString()) == 4
assert len(pystr.c_str()) == 5
print(pystr)
print(pystr.toString().encode("utf8"))
pystr1 = pyopenms.String("bläh")
pystr2 = pyopenms.String("bläh")
assert(pystr1 == pystr2)
pystr1 = pyopenms.String(u"bläh")
pystr2 = pyopenms.String(u"bläh")
assert(pystr1 == pystr2)
ustr = u"bläh"
pystr = pyopenms.String(ustr)
assert (pystr.toString() == u"bläh")
pystr = pyopenms.String(ustr.encode("utf8"))
assert (pystr.toString() == u"bläh")
pystr = pyopenms.String(ustr.encode("iso8859_15"))
assert (pystr.c_str().decode("iso8859_15") == u"bläh")
pystr = pyopenms.String(ustr.encode("utf16"))
assert (pystr.c_str().decode("utf16") == u"bläh")
pystr = pyopenms.String(ustr.encode("iso8859_15"))
didThrow = False
try:
pystr.toString()
except UnicodeDecodeError:
didThrow = True
assert didThrow
pystr = pyopenms.String(ustr.encode("utf16"))
didThrow = False
try:
pystr.toString()
except UnicodeDecodeError:
didThrow = True
assert didThrow
ustr = u"bläh"
s = pyopenms.MSSpectrum()
s.setNativeID(ustr)
r = s.getNativeID()
äh")
s.setNativeID(ustr.encode("utf8"))
r = s.getNativeID()
assert(r == u"bläh")
s.setNativeID(ustr.encode("utf16"))
r = s.getNativeID()
s.setNativeID(ustr.encode("iso8859_15"))
r = s.getNativeID()
assert(r.decode("iso8859_15") == u"bläh")
| true | true |
f73adb8ad71550654c63d4f0f2a53eb404ed330e | 2,019 | py | Python | gammapy/datasets/tests/test_datasets.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | 155 | 2015-02-25T12:38:02.000Z | 2022-03-13T17:54:30.000Z | gammapy/datasets/tests/test_datasets.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | 3,131 | 2015-01-06T15:36:23.000Z | 2022-03-31T17:30:57.000Z | gammapy/datasets/tests/test_datasets.py | Rishank2610/gammapy | 3cd64fdb2c53c8e5c697a9b85ef8d0486bff0b76 | [
"BSD-3-Clause"
] | 158 | 2015-03-16T20:36:44.000Z | 2022-03-30T16:05:37.000Z | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import pytest
from numpy.testing import assert_allclose
from gammapy.datasets import Datasets
from gammapy.modeling.tests.test_fit import MyDataset
@pytest.fixture(scope="session")
def datasets():
return Datasets([MyDataset(name="test-1"), MyDataset(name="test-2")])
def test_datasets_init(datasets):
# Passing a Python list of `Dataset` objects should work
Datasets(list(datasets))
# Passing an existing `Datasets` object should work
Datasets(datasets)
def test_datasets_types(datasets):
assert datasets.is_all_same_type
def test_datasets_likelihood(datasets):
likelihood = datasets.stat_sum()
assert_allclose(likelihood, 14472200.0002)
def test_datasets_str(datasets):
assert "Datasets" in str(datasets)
def test_datasets_getitem(datasets):
assert datasets["test-1"].name == "test-1"
assert datasets["test-2"].name == "test-2"
def test_names(datasets):
assert datasets.names == ["test-1", "test-2"]
def test_Datasets_mutation():
dat = MyDataset(name="test-1")
dats = Datasets([MyDataset(name="test-2"), MyDataset(name="test-3")])
dats2 = Datasets([MyDataset(name="test-4"), MyDataset(name="test-5")])
dats.insert(0, dat)
assert dats.names == ["test-1", "test-2", "test-3"]
dats.extend(dats2)
assert dats.names == ["test-1", "test-2", "test-3", "test-4", "test-5"]
dat3 = dats[3]
dats.remove(dats[3])
assert dats.names == ["test-1", "test-2", "test-3", "test-5"]
dats.append(dat3)
assert dats.names == ["test-1", "test-2", "test-3", "test-5", "test-4"]
dats.pop(3)
assert dats.names == ["test-1", "test-2", "test-3", "test-4"]
with pytest.raises(ValueError, match="Dataset names must be unique"):
dats.append(dat)
with pytest.raises(ValueError, match="Dataset names must be unique"):
dats.insert(0, dat)
with pytest.raises(ValueError, match="Dataset names must be unique"):
dats.extend(dats2)
| 29.691176 | 75 | 0.678554 |
import pytest
from numpy.testing import assert_allclose
from gammapy.datasets import Datasets
from gammapy.modeling.tests.test_fit import MyDataset
@pytest.fixture(scope="session")
def datasets():
return Datasets([MyDataset(name="test-1"), MyDataset(name="test-2")])
def test_datasets_init(datasets):
Datasets(list(datasets))
Datasets(datasets)
def test_datasets_types(datasets):
assert datasets.is_all_same_type
def test_datasets_likelihood(datasets):
likelihood = datasets.stat_sum()
assert_allclose(likelihood, 14472200.0002)
def test_datasets_str(datasets):
assert "Datasets" in str(datasets)
def test_datasets_getitem(datasets):
assert datasets["test-1"].name == "test-1"
assert datasets["test-2"].name == "test-2"
def test_names(datasets):
assert datasets.names == ["test-1", "test-2"]
def test_Datasets_mutation():
dat = MyDataset(name="test-1")
dats = Datasets([MyDataset(name="test-2"), MyDataset(name="test-3")])
dats2 = Datasets([MyDataset(name="test-4"), MyDataset(name="test-5")])
dats.insert(0, dat)
assert dats.names == ["test-1", "test-2", "test-3"]
dats.extend(dats2)
assert dats.names == ["test-1", "test-2", "test-3", "test-4", "test-5"]
dat3 = dats[3]
dats.remove(dats[3])
assert dats.names == ["test-1", "test-2", "test-3", "test-5"]
dats.append(dat3)
assert dats.names == ["test-1", "test-2", "test-3", "test-5", "test-4"]
dats.pop(3)
assert dats.names == ["test-1", "test-2", "test-3", "test-4"]
with pytest.raises(ValueError, match="Dataset names must be unique"):
dats.append(dat)
with pytest.raises(ValueError, match="Dataset names must be unique"):
dats.insert(0, dat)
with pytest.raises(ValueError, match="Dataset names must be unique"):
dats.extend(dats2)
| true | true |
f73adc78f1eeb493cb04a5690be0ceee688064e3 | 12,032 | py | Python | watcher/common/clients.py | ajaytikoo/watcher | 6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159 | [
"Apache-2.0"
] | 64 | 2015-10-18T02:57:24.000Z | 2022-01-13T11:27:51.000Z | watcher/common/clients.py | ajaytikoo/watcher | 6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159 | [
"Apache-2.0"
] | null | null | null | watcher/common/clients.py | ajaytikoo/watcher | 6dbac1f6ae7f3e10dfdcef5721fa4af7af54e159 | [
"Apache-2.0"
] | 35 | 2015-12-25T13:53:21.000Z | 2021-07-19T15:50:16.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from cinderclient import client as ciclient
from glanceclient import client as glclient
from gnocchiclient import client as gnclient
from ironicclient import client as irclient
from keystoneauth1 import adapter as ka_adapter
from keystoneauth1 import loading as ka_loading
from keystoneclient import client as keyclient
from monascaclient import client as monclient
from neutronclient.neutron import client as netclient
from novaclient import api_versions as nova_api_versions
from novaclient import client as nvclient
from watcher.common import exception
try:
from ceilometerclient import client as ceclient
HAS_CEILCLIENT = True
except ImportError:
HAS_CEILCLIENT = False
CONF = cfg.CONF
_CLIENTS_AUTH_GROUP = 'watcher_clients_auth'
# NOTE(mriedem): This is the minimum required version of the nova API for
# watcher features to work. If new features are added which require new
# versions, they should perform version discovery and be backward compatible
# for at least one release before raising the minimum required version.
MIN_NOVA_API_VERSION = '2.56'
def check_min_nova_api_version(config_version):
"""Validates the minimum required nova API version.
:param config_version: The configured [nova_client]/api_version value
:raises: ValueError if the configured version is less than the required
minimum
"""
min_required = nova_api_versions.APIVersion(MIN_NOVA_API_VERSION)
if nova_api_versions.APIVersion(config_version) < min_required:
raise ValueError('Invalid nova_client.api_version %s. %s or '
'greater is required.' % (config_version,
MIN_NOVA_API_VERSION))
class OpenStackClients(object):
"""Convenience class to create and cache client instances."""
def __init__(self):
self.reset_clients()
def reset_clients(self):
self._session = None
self._keystone = None
self._nova = None
self._glance = None
self._gnocchi = None
self._cinder = None
self._ceilometer = None
self._monasca = None
self._neutron = None
self._ironic = None
self._placement = None
def _get_keystone_session(self):
auth = ka_loading.load_auth_from_conf_options(CONF,
_CLIENTS_AUTH_GROUP)
sess = ka_loading.load_session_from_conf_options(CONF,
_CLIENTS_AUTH_GROUP,
auth=auth)
return sess
@property
def auth_url(self):
return self.keystone().auth_url
@property
def session(self):
if not self._session:
self._session = self._get_keystone_session()
return self._session
def _get_client_option(self, client, option):
return getattr(getattr(CONF, '%s_client' % client), option)
@exception.wrap_keystone_exception
def keystone(self):
if self._keystone:
return self._keystone
keystone_interface = self._get_client_option('keystone',
'interface')
keystone_region_name = self._get_client_option('keystone',
'region_name')
self._keystone = keyclient.Client(
interface=keystone_interface,
region_name=keystone_region_name,
session=self.session)
return self._keystone
@exception.wrap_keystone_exception
def nova(self):
if self._nova:
return self._nova
novaclient_version = self._get_client_option('nova', 'api_version')
check_min_nova_api_version(novaclient_version)
nova_endpoint_type = self._get_client_option('nova', 'endpoint_type')
nova_region_name = self._get_client_option('nova', 'region_name')
self._nova = nvclient.Client(novaclient_version,
endpoint_type=nova_endpoint_type,
region_name=nova_region_name,
session=self.session)
return self._nova
@exception.wrap_keystone_exception
def glance(self):
if self._glance:
return self._glance
glanceclient_version = self._get_client_option('glance', 'api_version')
glance_endpoint_type = self._get_client_option('glance',
'endpoint_type')
glance_region_name = self._get_client_option('glance', 'region_name')
self._glance = glclient.Client(glanceclient_version,
interface=glance_endpoint_type,
region_name=glance_region_name,
session=self.session)
return self._glance
@exception.wrap_keystone_exception
def gnocchi(self):
if self._gnocchi:
return self._gnocchi
gnocchiclient_version = self._get_client_option('gnocchi',
'api_version')
gnocchiclient_interface = self._get_client_option('gnocchi',
'endpoint_type')
gnocchiclient_region_name = self._get_client_option('gnocchi',
'region_name')
adapter_options = {
"interface": gnocchiclient_interface,
"region_name": gnocchiclient_region_name
}
self._gnocchi = gnclient.Client(gnocchiclient_version,
adapter_options=adapter_options,
session=self.session)
return self._gnocchi
@exception.wrap_keystone_exception
def cinder(self):
if self._cinder:
return self._cinder
cinderclient_version = self._get_client_option('cinder', 'api_version')
cinder_endpoint_type = self._get_client_option('cinder',
'endpoint_type')
cinder_region_name = self._get_client_option('cinder', 'region_name')
self._cinder = ciclient.Client(cinderclient_version,
endpoint_type=cinder_endpoint_type,
region_name=cinder_region_name,
session=self.session)
return self._cinder
@exception.wrap_keystone_exception
def ceilometer(self):
if self._ceilometer:
return self._ceilometer
ceilometerclient_version = self._get_client_option('ceilometer',
'api_version')
ceilometer_endpoint_type = self._get_client_option('ceilometer',
'endpoint_type')
ceilometer_region_name = self._get_client_option('ceilometer',
'region_name')
self._ceilometer = ceclient.get_client(
ceilometerclient_version,
endpoint_type=ceilometer_endpoint_type,
region_name=ceilometer_region_name,
session=self.session)
return self._ceilometer
@exception.wrap_keystone_exception
def monasca(self):
if self._monasca:
return self._monasca
monascaclient_version = self._get_client_option(
'monasca', 'api_version')
monascaclient_interface = self._get_client_option(
'monasca', 'interface')
monascaclient_region = self._get_client_option(
'monasca', 'region_name')
token = self.session.get_token()
watcher_clients_auth_config = CONF.get(_CLIENTS_AUTH_GROUP)
service_type = 'monitoring'
monasca_kwargs = {
'auth_url': watcher_clients_auth_config.auth_url,
'cert_file': watcher_clients_auth_config.certfile,
'insecure': watcher_clients_auth_config.insecure,
'key_file': watcher_clients_auth_config.keyfile,
'keystone_timeout': watcher_clients_auth_config.timeout,
'os_cacert': watcher_clients_auth_config.cafile,
'service_type': service_type,
'token': token,
'username': watcher_clients_auth_config.username,
'password': watcher_clients_auth_config.password,
}
endpoint = self.session.get_endpoint(service_type=service_type,
interface=monascaclient_interface,
region_name=monascaclient_region)
self._monasca = monclient.Client(
monascaclient_version, endpoint, **monasca_kwargs)
return self._monasca
@exception.wrap_keystone_exception
def neutron(self):
if self._neutron:
return self._neutron
neutronclient_version = self._get_client_option('neutron',
'api_version')
neutron_endpoint_type = self._get_client_option('neutron',
'endpoint_type')
neutron_region_name = self._get_client_option('neutron', 'region_name')
self._neutron = netclient.Client(neutronclient_version,
endpoint_type=neutron_endpoint_type,
region_name=neutron_region_name,
session=self.session)
self._neutron.format = 'json'
return self._neutron
@exception.wrap_keystone_exception
def ironic(self):
if self._ironic:
return self._ironic
ironicclient_version = self._get_client_option('ironic', 'api_version')
endpoint_type = self._get_client_option('ironic', 'endpoint_type')
ironic_region_name = self._get_client_option('ironic', 'region_name')
self._ironic = irclient.get_client(ironicclient_version,
interface=endpoint_type,
region_name=ironic_region_name,
session=self.session)
return self._ironic
@exception.wrap_keystone_exception
def placement(self):
if self._placement:
return self._placement
placement_version = self._get_client_option('placement',
'api_version')
placement_interface = self._get_client_option('placement',
'interface')
placement_region_name = self._get_client_option('placement',
'region_name')
# Set accept header on every request to ensure we notify placement
# service of our response body media type preferences.
headers = {'accept': 'application/json'}
self._placement = ka_adapter.Adapter(
session=self.session,
service_type='placement',
default_microversion=placement_version,
interface=placement_interface,
region_name=placement_region_name,
additional_headers=headers)
return self._placement
| 41.347079 | 79 | 0.604222 |
from oslo_config import cfg
from cinderclient import client as ciclient
from glanceclient import client as glclient
from gnocchiclient import client as gnclient
from ironicclient import client as irclient
from keystoneauth1 import adapter as ka_adapter
from keystoneauth1 import loading as ka_loading
from keystoneclient import client as keyclient
from monascaclient import client as monclient
from neutronclient.neutron import client as netclient
from novaclient import api_versions as nova_api_versions
from novaclient import client as nvclient
from watcher.common import exception
try:
from ceilometerclient import client as ceclient
HAS_CEILCLIENT = True
except ImportError:
HAS_CEILCLIENT = False
CONF = cfg.CONF
_CLIENTS_AUTH_GROUP = 'watcher_clients_auth'
MIN_NOVA_API_VERSION = '2.56'
def check_min_nova_api_version(config_version):
min_required = nova_api_versions.APIVersion(MIN_NOVA_API_VERSION)
if nova_api_versions.APIVersion(config_version) < min_required:
raise ValueError('Invalid nova_client.api_version %s. %s or '
'greater is required.' % (config_version,
MIN_NOVA_API_VERSION))
class OpenStackClients(object):
def __init__(self):
self.reset_clients()
def reset_clients(self):
self._session = None
self._keystone = None
self._nova = None
self._glance = None
self._gnocchi = None
self._cinder = None
self._ceilometer = None
self._monasca = None
self._neutron = None
self._ironic = None
self._placement = None
def _get_keystone_session(self):
auth = ka_loading.load_auth_from_conf_options(CONF,
_CLIENTS_AUTH_GROUP)
sess = ka_loading.load_session_from_conf_options(CONF,
_CLIENTS_AUTH_GROUP,
auth=auth)
return sess
@property
def auth_url(self):
return self.keystone().auth_url
@property
def session(self):
if not self._session:
self._session = self._get_keystone_session()
return self._session
def _get_client_option(self, client, option):
return getattr(getattr(CONF, '%s_client' % client), option)
@exception.wrap_keystone_exception
def keystone(self):
if self._keystone:
return self._keystone
keystone_interface = self._get_client_option('keystone',
'interface')
keystone_region_name = self._get_client_option('keystone',
'region_name')
self._keystone = keyclient.Client(
interface=keystone_interface,
region_name=keystone_region_name,
session=self.session)
return self._keystone
@exception.wrap_keystone_exception
def nova(self):
if self._nova:
return self._nova
novaclient_version = self._get_client_option('nova', 'api_version')
check_min_nova_api_version(novaclient_version)
nova_endpoint_type = self._get_client_option('nova', 'endpoint_type')
nova_region_name = self._get_client_option('nova', 'region_name')
self._nova = nvclient.Client(novaclient_version,
endpoint_type=nova_endpoint_type,
region_name=nova_region_name,
session=self.session)
return self._nova
@exception.wrap_keystone_exception
def glance(self):
if self._glance:
return self._glance
glanceclient_version = self._get_client_option('glance', 'api_version')
glance_endpoint_type = self._get_client_option('glance',
'endpoint_type')
glance_region_name = self._get_client_option('glance', 'region_name')
self._glance = glclient.Client(glanceclient_version,
interface=glance_endpoint_type,
region_name=glance_region_name,
session=self.session)
return self._glance
@exception.wrap_keystone_exception
def gnocchi(self):
if self._gnocchi:
return self._gnocchi
gnocchiclient_version = self._get_client_option('gnocchi',
'api_version')
gnocchiclient_interface = self._get_client_option('gnocchi',
'endpoint_type')
gnocchiclient_region_name = self._get_client_option('gnocchi',
'region_name')
adapter_options = {
"interface": gnocchiclient_interface,
"region_name": gnocchiclient_region_name
}
self._gnocchi = gnclient.Client(gnocchiclient_version,
adapter_options=adapter_options,
session=self.session)
return self._gnocchi
@exception.wrap_keystone_exception
def cinder(self):
if self._cinder:
return self._cinder
cinderclient_version = self._get_client_option('cinder', 'api_version')
cinder_endpoint_type = self._get_client_option('cinder',
'endpoint_type')
cinder_region_name = self._get_client_option('cinder', 'region_name')
self._cinder = ciclient.Client(cinderclient_version,
endpoint_type=cinder_endpoint_type,
region_name=cinder_region_name,
session=self.session)
return self._cinder
@exception.wrap_keystone_exception
def ceilometer(self):
if self._ceilometer:
return self._ceilometer
ceilometerclient_version = self._get_client_option('ceilometer',
'api_version')
ceilometer_endpoint_type = self._get_client_option('ceilometer',
'endpoint_type')
ceilometer_region_name = self._get_client_option('ceilometer',
'region_name')
self._ceilometer = ceclient.get_client(
ceilometerclient_version,
endpoint_type=ceilometer_endpoint_type,
region_name=ceilometer_region_name,
session=self.session)
return self._ceilometer
@exception.wrap_keystone_exception
def monasca(self):
if self._monasca:
return self._monasca
monascaclient_version = self._get_client_option(
'monasca', 'api_version')
monascaclient_interface = self._get_client_option(
'monasca', 'interface')
monascaclient_region = self._get_client_option(
'monasca', 'region_name')
token = self.session.get_token()
watcher_clients_auth_config = CONF.get(_CLIENTS_AUTH_GROUP)
service_type = 'monitoring'
monasca_kwargs = {
'auth_url': watcher_clients_auth_config.auth_url,
'cert_file': watcher_clients_auth_config.certfile,
'insecure': watcher_clients_auth_config.insecure,
'key_file': watcher_clients_auth_config.keyfile,
'keystone_timeout': watcher_clients_auth_config.timeout,
'os_cacert': watcher_clients_auth_config.cafile,
'service_type': service_type,
'token': token,
'username': watcher_clients_auth_config.username,
'password': watcher_clients_auth_config.password,
}
endpoint = self.session.get_endpoint(service_type=service_type,
interface=monascaclient_interface,
region_name=monascaclient_region)
self._monasca = monclient.Client(
monascaclient_version, endpoint, **monasca_kwargs)
return self._monasca
@exception.wrap_keystone_exception
def neutron(self):
if self._neutron:
return self._neutron
neutronclient_version = self._get_client_option('neutron',
'api_version')
neutron_endpoint_type = self._get_client_option('neutron',
'endpoint_type')
neutron_region_name = self._get_client_option('neutron', 'region_name')
self._neutron = netclient.Client(neutronclient_version,
endpoint_type=neutron_endpoint_type,
region_name=neutron_region_name,
session=self.session)
self._neutron.format = 'json'
return self._neutron
@exception.wrap_keystone_exception
def ironic(self):
if self._ironic:
return self._ironic
ironicclient_version = self._get_client_option('ironic', 'api_version')
endpoint_type = self._get_client_option('ironic', 'endpoint_type')
ironic_region_name = self._get_client_option('ironic', 'region_name')
self._ironic = irclient.get_client(ironicclient_version,
interface=endpoint_type,
region_name=ironic_region_name,
session=self.session)
return self._ironic
@exception.wrap_keystone_exception
def placement(self):
if self._placement:
return self._placement
placement_version = self._get_client_option('placement',
'api_version')
placement_interface = self._get_client_option('placement',
'interface')
placement_region_name = self._get_client_option('placement',
'region_name')
headers = {'accept': 'application/json'}
self._placement = ka_adapter.Adapter(
session=self.session,
service_type='placement',
default_microversion=placement_version,
interface=placement_interface,
region_name=placement_region_name,
additional_headers=headers)
return self._placement
| true | true |
f73add7dc1323902898553e9c6afdc1ebb9324f6 | 38,119 | py | Python | Font Info/Vertical Metrics Manager.py | KatjaSchimmel/Glyphs-Scripts | 0632d810c7849797405ac958c142ff313e2cb4c1 | [
"Apache-2.0"
] | 283 | 2015-01-07T12:35:35.000Z | 2022-03-29T06:10:44.000Z | Font Info/Vertical Metrics Manager.py | KatjaSchimmel/Glyphs-Scripts | 0632d810c7849797405ac958c142ff313e2cb4c1 | [
"Apache-2.0"
] | 203 | 2015-01-26T18:43:08.000Z | 2022-03-04T01:47:58.000Z | Font Info/Vertical Metrics Manager.py | KatjaSchimmel/Glyphs-Scripts | 0632d810c7849797405ac958c142ff313e2cb4c1 | [
"Apache-2.0"
] | 96 | 2015-01-19T20:58:03.000Z | 2022-03-29T06:10:56.000Z | #MenuTitle: Vertical Metrics Manager
# -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
try:
from builtins import str
except Exception as e:
print("Warning: 'future' module not installed. Run 'sudo pip install future' in Terminal.")
__doc__="""
Manage and sync ascender, descender and linegap values for hhea, OS/2 sTypo and OS/2 usWin.
"""
import vanilla
def cleanInt(numberString):
exportString = ""
numberString = unicode(numberString)
for char in numberString:
if char in "1234567890+-":
exportString += char
floatNumber = float(exportString)
floatNumber = round(floatNumber)
return int(floatNumber)
def roundUpByValue(x, roundBy):
if x == 0:
# avoid division by zero
return 0
else:
sign = x/abs(x) # +1 or -1
factor=0
if x%roundBy:
factor=1
return int((abs(x)//roundBy*roundBy + factor*roundBy) * sign)
class VerticalMetricsManager( object ):
def __init__( self ):
# Window 'self.w':
windowWidth = 330
windowHeight = 410
windowWidthResize = 100 # user can resize width by this value
windowHeightResize = 0 # user can resize height by this value
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ), # default window size
"Vertical Metrics Manager", # window title
minSize = ( windowWidth, windowHeight ), # minimum size (for resizing)
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ), # maximum size (for resizing)
autosaveName = "com.mekkablue.VerticalMetricsManager.mainwindow" # stores last window position and size
)
# UI elements:
linePos, inset, lineHeight = 12, 15, 22
self.w.descriptionText = vanilla.TextBox( (inset, linePos+2, -inset, 14), u"Manage and sync hhea, typo and win values.", sizeStyle='small', selectable=True )
linePos += lineHeight
self.w.titleAscent = vanilla.TextBox( (inset+70, linePos+4, 70, 14), u"Ascender", sizeStyle='small', selectable=True )
self.w.titleDescent = vanilla.TextBox( (inset+140, linePos+4, 70, 14), u"Descender", sizeStyle='small', selectable=True )
self.w.titleLineGap = vanilla.TextBox( (inset+210, linePos+4, 70, 14), u"Line Gap", sizeStyle='small', selectable=True )
linePos += lineHeight
self.w.titleWin = vanilla.TextBox( (inset, linePos+3, 70, 14), u"OS/2 usWin", sizeStyle='small', selectable=True )
self.w.winAsc = vanilla.EditText( (inset+70, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.winAsc.getNSTextField().setToolTip_("OS/2 usWinAscent. Should be the maximum height in your font. Expect clipping or rendering artefacts beyond this point.")
self.w.winDesc = vanilla.EditText( (inset+140, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.winDesc.getNSTextField().setToolTip_("OS/2 usWinDescent (unsigned integer). Should be the maximum depth in your font, like the lowest descender you have. Expect clipping or rendering artefacts beyond this point.")
self.w.winGap = vanilla.EditText( (inset+210, linePos, 65, 19), "", callback=None, sizeStyle='small', readOnly=True, placeholder=u"n/a" )
self.w.winGap.getNSTextField().setToolTip_("OS/2 usWinLineGap does not exist, hence greyed out here.")
self.w.winUpdate = vanilla.SquareButton( (inset+280, linePos, 20, 19), u"↺", sizeStyle='small', callback=self.update )
self.w.winUpdate.getNSButton().setToolTip_("Will recalculate the OS/2 usWin values in the fields to the left. Takes the measurement settings below into account, except for the Limit options.")
linePos += lineHeight+4
self.w.parenTypo = vanilla.TextBox( (inset-12, linePos+5, 15, 20), u"┏", sizeStyle='small', selectable=False )
self.w.titleTypo = vanilla.TextBox( (inset, linePos+3, 70, 14), u"OS/2 sTypo", sizeStyle='small', selectable=True )
self.w.typoAsc = vanilla.EditText( (inset+70, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.typoAsc.getNSTextField().setToolTip_("OS/2 sTypoAscender (positive value), should be the same as hheaAscender. Should be the maximum height of the glyphs relevant for horizontal text setting in your font, like the highest accented uppercase letter, typically Aring or Ohungarumlaut. Used for first baseline offset in DTP and office apps and together with the line gap value, also in browsers.")
self.w.typoDesc = vanilla.EditText( (inset+140, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.typoDesc.getNSTextField().setToolTip_("OS/2 sTypoDescender (negative value), should be the same as hheaDescender. Should be the maximum depth of the glyphs relevant for horizontal text setting in your font, like the lowest descender or bottom accent, typically Gcommaccent, Ccedilla, or one of the lowercase descenders (gjpqy). Together with the line gap value, used for line distance calculation in office apps and browsers.")
self.w.typoGap = vanilla.EditText( (inset+210, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.typoGap.getNSTextField().setToolTip_("OS/2 sTypoLineGap (positive value), should be the same as hheaLineGap. Should be either zero or a value for padding between lines that makes sense visually. Office apps insert this distance between the lines, browsers add half on top and half below each line, also for determining text object boundaries.")
self.w.typoUpdate = vanilla.SquareButton( (inset+280, linePos, 20, 19), u"↺", sizeStyle='small', callback=self.update )
self.w.typoUpdate.getNSButton().setToolTip_("Will recalculate the OS/2 sTypo values in the fields to the left. Takes the measurement settings below into account.")
linePos += lineHeight
self.w.parenConnect = vanilla.TextBox( (inset-12, linePos-int(lineHeight/2)+4, 15, 20), u"┃", sizeStyle='small', selectable=False )
self.w.parenHhea = vanilla.TextBox( (inset-12, linePos+3, 15, 20), u"┗", sizeStyle='small', selectable=False )
self.w.titleHhea = vanilla.TextBox( (inset, linePos+3, 70, 14), u"hhea", sizeStyle='small', selectable=True )
self.w.hheaAsc = vanilla.EditText( (inset+70, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.hheaAsc.getNSTextField().setToolTip_("hheaAscender (positive value), should be the same as OS/2 sTypoAscender. Should be the maximum height of the glyphs relevant for horizontal text setting in your font, like the highest accented uppercase letter, typically Aring or Ohungarumlaut. Used for first baseline offset in Mac office apps and together with the line gap value, also in Mac browsers.")
self.w.hheaDesc = vanilla.EditText( (inset+140, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.hheaDesc.getNSTextField().setToolTip_("hheaDescender (negative value), should be the same as OS/2 sTypoDescender. Should be the maximum depth of the glyphs relevant for horizontal text setting in your font, like the lowest descender or bottom accent, typically Gcommaccent, Ccedilla, or one of the lowercase descenders (gjpqy). Together with the line gap value, used for line distance calculation in office apps and browsers.")
self.w.hheaGap = vanilla.EditText( (inset+210, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.hheaGap.getNSTextField().setToolTip_("hheaLineGap (positive value), should be the same as OS/2 sTypoLineGap. Should be either zero or a value for padding between lines that makes sense visually. Mac office apps insert this distance between the lines, Mac browsers add half on top and half below each line, also for determining text object boundaries.")
self.w.hheaUpdate = vanilla.SquareButton( (inset+280, linePos, 20, 19), u"↺", sizeStyle='small', callback=self.update )
self.w.hheaUpdate.getNSButton().setToolTip_("Will recalculate the hhea values in the fields to the left. Takes the measurement settings below into account.")
linePos += lineHeight
self.w.useTypoMetrics = vanilla.CheckBox( (inset+70, linePos, -inset, 20), u"Use Typo Metrics (fsSelection bit 7)", value=True, callback=self.SavePreferences, sizeStyle='small' )
self.w.useTypoMetrics.getNSButton().setToolTip_("Should ALWAYS BE ON. Only uncheck if you really know what you are doing. If unchecked, line behaviour will be not consistent between apps and browsers because some apps prefer win values to sTypo values for determining line distances.")
self.w.useTypoMetricsUpdate = vanilla.SquareButton( (inset+280, linePos, 20, 19), u"↺", sizeStyle='small', callback=self.update )
self.w.useTypoMetricsUpdate.getNSButton().setToolTip_("Will reset the checkbox to the left to ON, because it should ALWAYS be on. Strongly recommended.")
linePos += lineHeight*1.5
self.w.descriptionMeasurements = vanilla.TextBox( (inset, linePos+2, -inset, 14), u"Taking Measurements (see tooltips for info):", sizeStyle='small', selectable=True )
linePos += lineHeight
self.w.round = vanilla.CheckBox( (inset, linePos, 70, 20), u"Round by:", value=True, callback=self.SavePreferences, sizeStyle='small' )
self.w.round.getNSButton().setToolTip_("Turn on if you want your values rounded. Recommended.")
self.w.roundValue = vanilla.EditText( (inset+75, linePos, 60, 19), "10", callback=self.SavePreferences, sizeStyle='small' )
self.w.roundValue.getNSTextField().setToolTip_("All value calculations will be rounded up to the next multiple of this value. Recommended: 10.")
linePos += lineHeight
self.w.includeAllMasters = vanilla.CheckBox( (inset, linePos, -inset, 20), u"Include all masters (otherwise current master only)", value=True, callback=self.SavePreferences, sizeStyle='small' )
self.w.includeAllMasters.getNSButton().setToolTip_("If checked, all masters will be measured. If unchecked, only the current master will be measured. Since vertical metrics should be the same throughout all masters, it also makes sense to measure on all masters.")
linePos += lineHeight
self.w.respectMarkToBaseOffset = vanilla.CheckBox( (inset, linePos, -inset, 20), "Include mark-to-base offset for OS/2 usWin", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.respectMarkToBaseOffset.getNSButton().setToolTip_("If checked will calculate the maximum possible height that can be reached with top-anchored marks, and the lowest depth with bottom-anchored marks, and use those values for the OS/2 usWin values. Strongly recommended for making fonts work on Windows if they rely on mark-to-base positioning (e.g. Arabic). Respects the ‘Limit to Script’ setting.")
linePos += lineHeight
self.w.ignoreNonExporting = vanilla.CheckBox( (inset, linePos, -inset, 20), u"Ignore non-exporting glyphs", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.ignoreNonExporting.getNSButton().setToolTip_("If checked, glyphs that do not export will be excluded from measuring. Recommended. (Ignored for calculating the OS/2 usWin values.)")
linePos += lineHeight
self.w.preferSelectedGlyphs = vanilla.CheckBox( (inset, linePos, -inset, 20), u"Limit to selected glyphs", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.preferSelectedGlyphs.getNSButton().setToolTip_("If checked, only the current glyphs will be measured. Can be combined with the other Limit options. May make sense if you want your metrics to be e.g. Latin-CE-centric.")
linePos += lineHeight
self.w.preferScript = vanilla.CheckBox( (inset, linePos, inset+110, 20), u"Limit to script:", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.preferScript.getNSButton().setToolTip_("If checked, only measures glyphs belonging to the selected writing system. Can be combined with the other Limit options. (Ignored for calculating the OS/2 usWin values, but respected for mark-to-base calculation.)")
self.w.preferScriptPopup = vanilla.PopUpButton( (inset+115, linePos+1, -inset-25, 17), (u"latin", u"greek"), sizeStyle='small', callback=self.SavePreferences )
self.w.preferScriptPopup.getNSPopUpButton().setToolTip_("Choose a writing system ('script') you want the measurements to be limited to. May make sense to ignore other scripts if the font is intended only for e.g. Cyrillic. Does not apply to OS/2 usWin")
self.w.preferScriptUpdate = vanilla.SquareButton( (-inset-20, linePos+1, -inset, 18), u"↺", sizeStyle='small', callback=self.update )
self.w.preferScriptUpdate.getNSButton().setToolTip_("Update the script popup to the left with all scripts (writing systems) found in the current font.")
linePos += lineHeight
self.w.preferCategory = vanilla.CheckBox( (inset, linePos, inset+110, 20), u"Limit to category:", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.preferCategory.getNSButton().setToolTip_("If checked, only measures glyphs belonging to the selected glyph category. Can be combined with the other Limit options. (Ignored for calculating the OS/2 usWin values.)")
self.w.preferCategoryPopup = vanilla.PopUpButton( (inset+115, linePos+1, -inset-25, 17), (u"Letter", u"Number"), sizeStyle='small', callback=self.SavePreferences )
self.w.preferCategoryPopup.getNSPopUpButton().setToolTip_("Choose a glyph category you want the measurements to be limited to. It may make sense to limit only to Letter.")
self.w.preferCategoryUpdate = vanilla.SquareButton( (-inset-20, linePos+1, -inset, 18), u"↺", sizeStyle='small', callback=self.update )
self.w.preferCategoryUpdate.getNSButton().setToolTip_("Update the category popup to the left with all glyph categories found in the current font.")
linePos += lineHeight
self.w.allOpenFonts = vanilla.CheckBox( (inset, linePos-1, -inset, 20), u"⚠️ Read out and apply to ALL open fonts", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.allOpenFonts.getNSButton().setToolTip_(u"If activated, does not only measure the frontmost font, but all open fonts. Careful: when you press the Apply button, will also apply it to all open fonts. Useful if you have all font files for a font family open.")
linePos += lineHeight
# Run Button:
self.w.helpButton = vanilla.HelpButton((inset-2, -20-inset, 21, -inset+2), callback=self.openURL )
self.w.helpButton.getNSButton().setToolTip_("Opens the Vertical Metrics tutorial (highly recommended) in your web browser.")
self.w.runButton = vanilla.Button( (-120-inset, -20-inset, -inset, -inset), "Apply to Font", sizeStyle='regular', callback=self.VerticalMetricsManagerMain )
self.w.runButton.getNSButton().setToolTip_("Insert the OS/2, hhea and fsSelection values above as custom parameters in the font. The number values will be inserted into each master. Blank values will delete the respective parameters.")
self.w.setDefaultButton( self.w.runButton )
# Load Settings:
if not self.LoadPreferences():
print("Note: 'Vertical Metrics Manager' could not load preferences. Will resort to defaults")
# Open window and focus on it:
self.w.open()
self.w.makeKey()
def updateUI(self, sender=None):
self.w.includeAllMasters.enable(not self.w.allOpenFonts.get())
self.w.runButton.setTitle( "Apply to Font%s" % (
"s" if self.w.allOpenFonts.get() else ""
))
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.allOpenFonts"] = self.w.allOpenFonts.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferSelectedGlyphs"] = self.w.preferSelectedGlyphs.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferCategory"] = self.w.preferCategory.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferScript"] = self.w.preferScript.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.ignoreNonExporting"] = self.w.ignoreNonExporting.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.includeAllMasters"] = self.w.includeAllMasters.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.respectMarkToBaseOffset"] = self.w.respectMarkToBaseOffset.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.round"] = self.w.round.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.roundValue"] = self.w.roundValue.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.useTypoMetrics"] = self.w.useTypoMetrics.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaGap"] = int(self.w.hheaGap.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaDesc"] = int(self.w.hheaDesc.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaAsc"] = int(self.w.hheaAsc.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoGap"] = int(self.w.typoGap.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoDesc"] = int(self.w.typoDesc.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoAsc"] = int(self.w.typoAsc.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winDesc"] = int(self.w.winDesc.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winAsc"] = int(self.w.winAsc.getNSTextField().integerValue())
self.updateUI()
except:
import traceback
print(traceback.format_exc())
return False
return True
def LoadPreferences( self ):
try:
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.allOpenFonts", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.preferSelectedGlyphs", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.preferCategory", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.preferScript", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.ignoreNonExporting", 1)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.includeAllMasters", 1)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.respectMarkToBaseOffset", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.round", 1)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.roundValue", 10)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.useTypoMetrics", 1)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.hheaGap", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.hheaDesc", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.hheaAsc", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.typoGap", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.typoDesc", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.typoAsc", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.winDesc", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.winAsc", 0)
self.w.allOpenFonts.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.allOpenFonts"] )
self.w.preferSelectedGlyphs.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferSelectedGlyphs"] )
self.w.preferCategory.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferCategory"] )
self.w.preferScript.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferScript"] )
self.w.ignoreNonExporting.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.ignoreNonExporting"] )
self.w.includeAllMasters.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.includeAllMasters"] )
self.w.respectMarkToBaseOffset.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.respectMarkToBaseOffset"] )
self.w.round.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.round"] )
self.w.roundValue.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.roundValue"] )
self.w.useTypoMetrics.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.useTypoMetrics"] )
self.w.hheaGap.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaGap"] )
self.w.hheaDesc.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaDesc"] )
self.w.hheaAsc.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaAsc"] )
self.w.typoGap.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoGap"] )
self.w.typoDesc.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoDesc"] )
self.w.typoAsc.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoAsc"] )
self.w.winDesc.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winDesc"] )
self.w.winAsc.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winAsc"] )
self.updateUI()
except:
import traceback
print(traceback.format_exc())
return False
return True
def openURL( self, sender ):
URL = None
if sender == self.w.helpButton:
URL = "https://glyphsapp.com/tutorials/vertical-metrics"
if URL:
import webbrowser
webbrowser.open( URL )
def update(self, sender=None):
Glyphs.clearLog() # clears macro window log
# update settings to the latest user input:
if not self.SavePreferences( self ):
print("Note: 'Vertical Metrics Manager' could not write preferences.")
frontmostFont = Glyphs.font
allOpenFonts = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.allOpenFonts"]
if allOpenFonts:
theseFonts = Glyphs.fonts
else:
theseFonts = (frontmostFont,) # iterable tuple of frontmost font only
theseFamilyNames = [f.familyName for f in theseFonts]
print("\nVertical Metrics Manager\nUpdating values for:\n")
for i, thisFont in enumerate(theseFonts):
print("%i. %s:"%(i+1, thisFont.familyName))
if thisFont.filepath:
print(thisFont.filepath)
else:
print("⚠️ The font file has not been saved yet.")
print()
ignoreNonExporting = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.ignoreNonExporting"]
includeAllMasters = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.includeAllMasters"]
shouldRound = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.round"]
roundValue = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.roundValue"])
respectMarkToBaseOffset = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.respectMarkToBaseOffset"]
shouldLimitToScript = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferScript"]
selectedScript = self.w.preferScriptPopup.getTitle()
# win measurements:
if sender == self.w.winUpdate:
print("Determining OS/2 usWin values:\n")
lowest, highest = 0.0, 0.0
lowestGlyph, highestGlyph = None, None
# respectMarkToBaseOffset:
highestTopAnchor, lowestBottomAnchor = 0.0, 1.0
highestTopAnchorGlyph, lowestBottomAnchorGlyph = None, None
largestTopMark, largestBottomMark = 0.0, 0.0
largestTopMarkGlyph, largestBottomMarkGlyph = None, None
fontReport = ""
for i,thisFont in enumerate(theseFonts):
if allOpenFonts:
fontReport = "%i. %s, " % (i+1, thisFont.familyName)
currentMaster = thisFont.selectedFontMaster
for thisGlyph in thisFont.glyphs:
if thisGlyph.export or not ignoreNonExporting:
scriptCheckOK = not shouldLimitToScript or thisGlyph.script == selectedScript # needed for respectMarkToBaseOffset
for thisLayer in thisGlyph.layers:
belongsToCurrentMaster = thisLayer.associatedFontMaster() == currentMaster
if belongsToCurrentMaster or includeAllMasters or allOpenFonts:
if thisLayer.isSpecialLayer or thisLayer.isMasterLayer:
lowestPointInLayer = thisLayer.bounds.origin.y
highestPointInLayer = lowestPointInLayer + thisLayer.bounds.size.height
if lowestPointInLayer < lowest:
lowest = lowestPointInLayer
lowestGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
if highestPointInLayer > highest:
highest = highestPointInLayer
highestGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
# respectMarkToBaseOffset:
if respectMarkToBaseOffset and scriptCheckOK:
if thisGlyph.category == "Mark":
topAnchors = [a for a in thisLayer.anchorsTraversingComponents() if a.name=="_top"]
if topAnchors:
topAnchor = topAnchors[0]
topSpan = highestPointInLayer - topAnchor.y
if topSpan > largestTopMark:
largestTopMark = topSpan
largestTopMarkGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
bottomAnchors = [a for a in thisLayer.anchorsTraversingComponents() if a.name=="_bottom"]
if bottomAnchors:
bottomAnchor = bottomAnchors[0]
bottomSpan = abs(lowestPointInLayer - bottomAnchor.y)
if bottomSpan > largestBottomMark:
largestBottomMark = bottomSpan
largestBottomMarkGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
else:
topAnchors = [a for a in thisLayer.anchorsTraversingComponents() if a.name=="top"]
if topAnchors:
topAnchor = topAnchors[0]
if topAnchor.y > highestTopAnchor:
highestTopAnchor = topAnchor.y
highestTopAnchorGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
bottomAnchors = [a for a in thisLayer.anchorsTraversingComponents() if a.name=="bottom"]
if bottomAnchors:
bottomAnchor = bottomAnchors[0]
if bottomAnchor.y < lowestBottomAnchor:
lowestBottomAnchor = bottomAnchor.y
lowestBottomAnchorGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
print("Highest relevant glyph:")
print("- %s (%i)" % (highestGlyph, highest))
print()
print("Lowest relevant glyph:")
print("- %s (%i)" % (lowestGlyph, lowest))
print()
if respectMarkToBaseOffset:
highestMarkToBase = highestTopAnchor+largestTopMark
lowestMarkToBase = lowestBottomAnchor-largestBottomMark
print("Highest top anchor:")
print("- %s (%i)" % (highestTopAnchorGlyph, highestTopAnchor))
print("Largest top mark span (_top to top edge):")
print("- %s (%i)" % (largestTopMarkGlyph, largestTopMark))
print("Highest possible mark-to-base: %i + %i = %i" % (highestTopAnchor, largestTopMark, highestMarkToBase))
print()
print("Lowest bottom anchor:")
print("- %s (%i)" % (lowestBottomAnchorGlyph, lowestBottomAnchor))
print("Largest bottom mark span (_bottom to bottom edge):")
print("- %s (%i)" % (largestBottomMarkGlyph, largestBottomMark))
print("Lowest possible mark-to-base: %i - %i = %i" % (lowestBottomAnchor, largestBottomMark, lowestMarkToBase))
print()
if lowestMarkToBase < lowest:
lowest = lowestMarkToBase
if highestMarkToBase > highest:
highest = highestMarkToBase
if shouldRound:
highest = roundUpByValue(highest,roundValue)
lowest = roundUpByValue(lowest,roundValue)
winAsc = int(highest)
winDesc = abs(int(lowest))
print("Calculated values:")
print("- usWinAscent: %s" % winAsc)
print("- usWinDescent: %s" % winDesc)
print()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winAsc"] = winAsc
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winDesc"] = winDesc
# Use Typo Metrics checkbox
elif sender == self.w.useTypoMetricsUpdate:
print("Use Typo Metrics (fsSelection bit 7) should always be YES.")
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.useTypoMetrics"] = 1
# hhea and typo popups:
elif sender in (self.w.hheaUpdate, self.w.typoUpdate):
if sender == self.w.hheaUpdate:
name = "hhea"
else:
name = "OS/2 sTypo"
print("Determining %s values:\n" % name)
lowest, highest = 0.0, 0.0
lowestGlyph, highestGlyph = None, None
shouldLimitToCategory = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferCategory"]
shouldLimitToScript = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferScript"]
shouldLimitToSelectedGlyphs = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferSelectedGlyphs"]
selectedCategory = self.w.preferCategoryPopup.getTitle()
selectedScript = self.w.preferScriptPopup.getTitle()
if shouldLimitToSelectedGlyphs:
selectedGlyphNames = [l.parent.name for l in frontmostFont.selectedLayers]
if not selectedGlyphNames:
print(u"⚠️ Ignoring limitation to selected glyphs because no glyphs are selected (in frontmost font).")
shouldLimitToSelectedGlyphs = False
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferSelectedGlyphs"] = shouldLimitToSelectedGlyphs
self.LoadPreferences()
else:
selectedGlyphNames = ()
for i,thisFont in enumerate(theseFonts):
if allOpenFonts:
fontReport = "%i. %s, " % (i+1, thisFont.familyName)
else:
fontReport = ""
currentMaster = thisFont.selectedFontMaster
# ascender & descender calculation:
for thisGlyph in thisFont.glyphs:
exportCheckOK = not ignoreNonExporting or thisGlyph.export
categoryCheckOK = not shouldLimitToCategory or thisGlyph.category == selectedCategory
scriptCheckOK = not shouldLimitToScript or thisGlyph.script == selectedScript
selectedCheckOK = not shouldLimitToSelectedGlyphs or thisGlyph.name in selectedGlyphNames
if exportCheckOK and categoryCheckOK and scriptCheckOK and selectedCheckOK:
for thisLayer in thisGlyph.layers:
belongsToCurrentMaster = thisLayer.associatedFontMaster() == currentMaster
if belongsToCurrentMaster or includeAllMasters or allOpenFonts:
if thisLayer.isSpecialLayer or thisLayer.isMasterLayer:
lowestPointInLayer = thisLayer.bounds.origin.y
highestPointInLayer = lowestPointInLayer + thisLayer.bounds.size.height
if lowestPointInLayer < lowest:
lowest = lowestPointInLayer
lowestGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
if highestPointInLayer > highest:
highest = highestPointInLayer
highestGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
print("Highest relevant glyph:")
print("- %s (%i)" % (highestGlyph, highest))
print()
print("Lowest relevant glyph:")
print("- %s (%i)" % (lowestGlyph, lowest))
print()
if shouldRound:
highest = roundUpByValue(highest,roundValue)
lowest = roundUpByValue(lowest,roundValue)
asc = int(highest)
desc = int(lowest)
# line gap calculation:
xHeight = 0
for thisFont in theseFonts:
# determine highest x-height:
for thisMaster in thisFont.masters:
measuredX = thisMaster.xHeight
if measuredX >= thisMaster.capHeight: # all caps font
measuredX = thisMaster.capHeight/2
if measuredX > xHeight:
xHeight = thisMaster.xHeight
if shouldRound:
xHeight = roundUpByValue(xHeight, roundValue)
# calculate linegap, based on highest x-height and calculated asc/desc values:
#
# TODO: verify
# LineGap >= (yMax - yMin) - (Ascender - Descender
# (source: https://docs.microsoft.com/en-us/typography/opentype/spec/recom)
#
idealLineSpan = abs(xHeight * 2.2)
if shouldRound:
idealLineSpan = roundUpByValue(idealLineSpan, roundValue)
actualLineSpan = abs(asc)+abs(desc)
if idealLineSpan > actualLineSpan:
gap = idealLineSpan - actualLineSpan
if shouldRound:
gap = roundUpByValue(gap, roundValue)
else:
gap = 0
print("Calculated values:")
print("- %s Ascender: %i" % (name, asc))
print("- %s Descender: %i" % (name, desc))
print("- %s LineGap: %i" % (name, gap))
print()
if sender == self.w.hheaUpdate:
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaAsc"] = asc
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaDesc"] = desc
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaGap"] = gap
else:
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoAsc"] = asc
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoDesc"] = desc
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoGap"] = gap
# Updating "Limit to Script" popup:
elif sender == self.w.preferScriptUpdate:
scripts = []
shouldIgnoreNonExporting = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.ignoreNonExporting"]
for thisGlyph in frontmostFont.glyphs:
inclusionCheckOK = thisGlyph.export or not shouldIgnoreNonExporting
if inclusionCheckOK and thisGlyph.script and not thisGlyph.script in scripts:
scripts.append(thisGlyph.script)
if scripts:
self.w.preferScriptPopup.setItems(scripts)
print(u"✅ Found scripts:\n%s" % ", ".join(scripts))
else:
msg = u"Found no glyphs belonging to any script in the frontmost font. Please double check."
print("⚠️ %s"%msg)
Message(title="Error Determining Scripts", message="Cannot determine list of scripts. %s"%msg, OKButton=None)
# Updating "Limit to Category" popup:
elif sender == self.w.preferCategoryUpdate:
categories = []
shouldIgnoreNonExporting = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.ignoreNonExporting"]
for thisGlyph in thisFont.glyphs:
inclusionCheckOK = thisGlyph.export or not shouldIgnoreNonExporting
if inclusionCheckOK and not thisGlyph.category in categories:
categories.append(thisGlyph.category)
if categories:
self.w.preferCategoryPopup.setItems(categories)
print(u"✅ Found categories:\n%s" % ", ".join(categories))
else:
msg = u"Found no glyphs belonging to any category in the current font. Please double check."
print("⚠️ %s"%msg)
Message(title="Error Determining Categories", message="Cannot determine list of categories. %s"%msg, OKButton=None)
self.LoadPreferences()
print("hheaGap", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaGap"])
print("hheaDesc", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaDesc"])
print("hheaAsc", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaAsc"])
print("typoGap", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoGap"])
print("typoDesc", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoDesc"])
print("typoAsc", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoAsc"])
print("winDesc", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winDesc"])
print("winAsc", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winAsc"])
def VerticalMetricsManagerMain( self, sender ):
try:
Glyphs.clearLog() # clears macro window log
print("Vertical Metrics Manager: setting parameters\n")
# update settings to the latest user input:
if not self.SavePreferences( self ):
print("Note: 'Vertical Metrics Manager' could not write preferences.\n")
typoAsc = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoAsc"])
typoDesc = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoDesc"])
typoGap = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoGap"])
hheaAsc = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaAsc"])
hheaDesc = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaDesc"])
hheaGap = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaGap"])
winDesc = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winDesc"])
winAsc = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winAsc"])
verticalMetricDict = {
"typoAscender": typoAsc,
"typoDescender": typoDesc,
"typoLineGap": typoGap,
"hheaAscender": hheaAsc,
"hheaDescender": hheaDesc,
"hheaLineGap": hheaGap,
"winDescent": winDesc,
"winAscent": winAsc,
}
allOpenFonts = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.allOpenFonts"]
if allOpenFonts:
theseFonts = Glyphs.fonts
else:
theseFonts = (Glyphs.font,) # iterable tuple of frontmost font only
for i, thisFont in enumerate(theseFonts):
print("\n\n🔠 %s%s:"%(
"%i. "%(i+1) if allOpenFonts else "",
thisFont.familyName,
))
if thisFont.filepath:
print("📄 %s" % thisFont.filepath)
else:
print("⚠️ The font file has not been saved yet.")
for verticalMetricName in sorted(verticalMetricDict.keys()):
try:
metricValue = int( verticalMetricDict[verticalMetricName] )
print(u"🔢 %s: %i" % (verticalMetricName, metricValue))
for thisMaster in thisFont.masters:
thisMaster.customParameters[verticalMetricName] = metricValue
print(u" ✅ Master %s: custom parameter set." % thisMaster.name)
except:
print(u"❌ %s: No valid value found. Deleting parameters:" % verticalMetricName)
for thisMaster in thisFont.masters:
if thisMaster.customParameters[verticalMetricName]:
del thisMaster.customParameters[verticalMetricName]
print(u" ⚠️ Master %s: custom parameter removed." % thisMaster.name)
else:
print(u" ❎ Master %s: no custom parameter found." % thisMaster.name)
useTypoMetrics = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.useTypoMetrics"]
print(u"*️⃣ Use Typo Metrics (fsSelection bit 7)")
if useTypoMetrics:
thisFont.customParameters["Use Typo Metrics"] = True
print(u" ✅ Set Use Typo Metrics parameter to YES.")
else:
thisFont.customParameters["Use Typo Metrics"] = False
print(u" ⁉️ Set Use Typo Metrics parameter to NO. This is not recommended. Are you sure?")
# Floating notification:
Glyphs.showNotification(
u"Vertical Metrics Set",
u"Set vertical metrics in %i font%s. Detailed report in Macro Window." % (
len(theseFonts),
"" if len(theseFonts)==1 else "s",
),
)
except Exception as e:
# brings macro window to front and reports error:
Glyphs.showMacroWindow()
print("Vertical Metrics Manager Error: %s" % e)
import traceback
print(traceback.format_exc())
VerticalMetricsManager() | 58.285933 | 436 | 0.738005 |
from __future__ import division, print_function, unicode_literals
try:
from builtins import str
except Exception as e:
print("Warning: 'future' module not installed. Run 'sudo pip install future' in Terminal.")
__doc__="""
Manage and sync ascender, descender and linegap values for hhea, OS/2 sTypo and OS/2 usWin.
"""
import vanilla
def cleanInt(numberString):
exportString = ""
numberString = unicode(numberString)
for char in numberString:
if char in "1234567890+-":
exportString += char
floatNumber = float(exportString)
floatNumber = round(floatNumber)
return int(floatNumber)
def roundUpByValue(x, roundBy):
if x == 0:
return 0
else:
sign = x/abs(x)
factor=0
if x%roundBy:
factor=1
return int((abs(x)//roundBy*roundBy + factor*roundBy) * sign)
class VerticalMetricsManager( object ):
def __init__( self ):
windowWidth = 330
windowHeight = 410
windowWidthResize = 100
windowHeightResize = 0
self.w = vanilla.FloatingWindow(
( windowWidth, windowHeight ),
"Vertical Metrics Manager",
minSize = ( windowWidth, windowHeight ),
maxSize = ( windowWidth + windowWidthResize, windowHeight + windowHeightResize ),
autosaveName = "com.mekkablue.VerticalMetricsManager.mainwindow"
)
linePos, inset, lineHeight = 12, 15, 22
self.w.descriptionText = vanilla.TextBox( (inset, linePos+2, -inset, 14), u"Manage and sync hhea, typo and win values.", sizeStyle='small', selectable=True )
linePos += lineHeight
self.w.titleAscent = vanilla.TextBox( (inset+70, linePos+4, 70, 14), u"Ascender", sizeStyle='small', selectable=True )
self.w.titleDescent = vanilla.TextBox( (inset+140, linePos+4, 70, 14), u"Descender", sizeStyle='small', selectable=True )
self.w.titleLineGap = vanilla.TextBox( (inset+210, linePos+4, 70, 14), u"Line Gap", sizeStyle='small', selectable=True )
linePos += lineHeight
self.w.titleWin = vanilla.TextBox( (inset, linePos+3, 70, 14), u"OS/2 usWin", sizeStyle='small', selectable=True )
self.w.winAsc = vanilla.EditText( (inset+70, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.winAsc.getNSTextField().setToolTip_("OS/2 usWinAscent. Should be the maximum height in your font. Expect clipping or rendering artefacts beyond this point.")
self.w.winDesc = vanilla.EditText( (inset+140, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.winDesc.getNSTextField().setToolTip_("OS/2 usWinDescent (unsigned integer). Should be the maximum depth in your font, like the lowest descender you have. Expect clipping or rendering artefacts beyond this point.")
self.w.winGap = vanilla.EditText( (inset+210, linePos, 65, 19), "", callback=None, sizeStyle='small', readOnly=True, placeholder=u"n/a" )
self.w.winGap.getNSTextField().setToolTip_("OS/2 usWinLineGap does not exist, hence greyed out here.")
self.w.winUpdate = vanilla.SquareButton( (inset+280, linePos, 20, 19), u"↺", sizeStyle='small', callback=self.update )
self.w.winUpdate.getNSButton().setToolTip_("Will recalculate the OS/2 usWin values in the fields to the left. Takes the measurement settings below into account, except for the Limit options.")
linePos += lineHeight+4
self.w.parenTypo = vanilla.TextBox( (inset-12, linePos+5, 15, 20), u"┏", sizeStyle='small', selectable=False )
self.w.titleTypo = vanilla.TextBox( (inset, linePos+3, 70, 14), u"OS/2 sTypo", sizeStyle='small', selectable=True )
self.w.typoAsc = vanilla.EditText( (inset+70, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.typoAsc.getNSTextField().setToolTip_("OS/2 sTypoAscender (positive value), should be the same as hheaAscender. Should be the maximum height of the glyphs relevant for horizontal text setting in your font, like the highest accented uppercase letter, typically Aring or Ohungarumlaut. Used for first baseline offset in DTP and office apps and together with the line gap value, also in browsers.")
self.w.typoDesc = vanilla.EditText( (inset+140, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.typoDesc.getNSTextField().setToolTip_("OS/2 sTypoDescender (negative value), should be the same as hheaDescender. Should be the maximum depth of the glyphs relevant for horizontal text setting in your font, like the lowest descender or bottom accent, typically Gcommaccent, Ccedilla, or one of the lowercase descenders (gjpqy). Together with the line gap value, used for line distance calculation in office apps and browsers.")
self.w.typoGap = vanilla.EditText( (inset+210, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.typoGap.getNSTextField().setToolTip_("OS/2 sTypoLineGap (positive value), should be the same as hheaLineGap. Should be either zero or a value for padding between lines that makes sense visually. Office apps insert this distance between the lines, browsers add half on top and half below each line, also for determining text object boundaries.")
self.w.typoUpdate = vanilla.SquareButton( (inset+280, linePos, 20, 19), u"↺", sizeStyle='small', callback=self.update )
self.w.typoUpdate.getNSButton().setToolTip_("Will recalculate the OS/2 sTypo values in the fields to the left. Takes the measurement settings below into account.")
linePos += lineHeight
self.w.parenConnect = vanilla.TextBox( (inset-12, linePos-int(lineHeight/2)+4, 15, 20), u"┃", sizeStyle='small', selectable=False )
self.w.parenHhea = vanilla.TextBox( (inset-12, linePos+3, 15, 20), u"┗", sizeStyle='small', selectable=False )
self.w.titleHhea = vanilla.TextBox( (inset, linePos+3, 70, 14), u"hhea", sizeStyle='small', selectable=True )
self.w.hheaAsc = vanilla.EditText( (inset+70, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.hheaAsc.getNSTextField().setToolTip_("hheaAscender (positive value), should be the same as OS/2 sTypoAscender. Should be the maximum height of the glyphs relevant for horizontal text setting in your font, like the highest accented uppercase letter, typically Aring or Ohungarumlaut. Used for first baseline offset in Mac office apps and together with the line gap value, also in Mac browsers.")
self.w.hheaDesc = vanilla.EditText( (inset+140, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.hheaDesc.getNSTextField().setToolTip_("hheaDescender (negative value), should be the same as OS/2 sTypoDescender. Should be the maximum depth of the glyphs relevant for horizontal text setting in your font, like the lowest descender or bottom accent, typically Gcommaccent, Ccedilla, or one of the lowercase descenders (gjpqy). Together with the line gap value, used for line distance calculation in office apps and browsers.")
self.w.hheaGap = vanilla.EditText( (inset+210, linePos, 65, 19), "", callback=self.SavePreferences, sizeStyle='small' )
self.w.hheaGap.getNSTextField().setToolTip_("hheaLineGap (positive value), should be the same as OS/2 sTypoLineGap. Should be either zero or a value for padding between lines that makes sense visually. Mac office apps insert this distance between the lines, Mac browsers add half on top and half below each line, also for determining text object boundaries.")
self.w.hheaUpdate = vanilla.SquareButton( (inset+280, linePos, 20, 19), u"↺", sizeStyle='small', callback=self.update )
self.w.hheaUpdate.getNSButton().setToolTip_("Will recalculate the hhea values in the fields to the left. Takes the measurement settings below into account.")
linePos += lineHeight
self.w.useTypoMetrics = vanilla.CheckBox( (inset+70, linePos, -inset, 20), u"Use Typo Metrics (fsSelection bit 7)", value=True, callback=self.SavePreferences, sizeStyle='small' )
self.w.useTypoMetrics.getNSButton().setToolTip_("Should ALWAYS BE ON. Only uncheck if you really know what you are doing. If unchecked, line behaviour will be not consistent between apps and browsers because some apps prefer win values to sTypo values for determining line distances.")
self.w.useTypoMetricsUpdate = vanilla.SquareButton( (inset+280, linePos, 20, 19), u"↺", sizeStyle='small', callback=self.update )
self.w.useTypoMetricsUpdate.getNSButton().setToolTip_("Will reset the checkbox to the left to ON, because it should ALWAYS be on. Strongly recommended.")
linePos += lineHeight*1.5
self.w.descriptionMeasurements = vanilla.TextBox( (inset, linePos+2, -inset, 14), u"Taking Measurements (see tooltips for info):", sizeStyle='small', selectable=True )
linePos += lineHeight
self.w.round = vanilla.CheckBox( (inset, linePos, 70, 20), u"Round by:", value=True, callback=self.SavePreferences, sizeStyle='small' )
self.w.round.getNSButton().setToolTip_("Turn on if you want your values rounded. Recommended.")
self.w.roundValue = vanilla.EditText( (inset+75, linePos, 60, 19), "10", callback=self.SavePreferences, sizeStyle='small' )
self.w.roundValue.getNSTextField().setToolTip_("All value calculations will be rounded up to the next multiple of this value. Recommended: 10.")
linePos += lineHeight
self.w.includeAllMasters = vanilla.CheckBox( (inset, linePos, -inset, 20), u"Include all masters (otherwise current master only)", value=True, callback=self.SavePreferences, sizeStyle='small' )
self.w.includeAllMasters.getNSButton().setToolTip_("If checked, all masters will be measured. If unchecked, only the current master will be measured. Since vertical metrics should be the same throughout all masters, it also makes sense to measure on all masters.")
linePos += lineHeight
self.w.respectMarkToBaseOffset = vanilla.CheckBox( (inset, linePos, -inset, 20), "Include mark-to-base offset for OS/2 usWin", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.respectMarkToBaseOffset.getNSButton().setToolTip_("If checked will calculate the maximum possible height that can be reached with top-anchored marks, and the lowest depth with bottom-anchored marks, and use those values for the OS/2 usWin values. Strongly recommended for making fonts work on Windows if they rely on mark-to-base positioning (e.g. Arabic). Respects the ‘Limit to Script’ setting.")
linePos += lineHeight
self.w.ignoreNonExporting = vanilla.CheckBox( (inset, linePos, -inset, 20), u"Ignore non-exporting glyphs", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.ignoreNonExporting.getNSButton().setToolTip_("If checked, glyphs that do not export will be excluded from measuring. Recommended. (Ignored for calculating the OS/2 usWin values.)")
linePos += lineHeight
self.w.preferSelectedGlyphs = vanilla.CheckBox( (inset, linePos, -inset, 20), u"Limit to selected glyphs", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.preferSelectedGlyphs.getNSButton().setToolTip_("If checked, only the current glyphs will be measured. Can be combined with the other Limit options. May make sense if you want your metrics to be e.g. Latin-CE-centric.")
linePos += lineHeight
self.w.preferScript = vanilla.CheckBox( (inset, linePos, inset+110, 20), u"Limit to script:", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.preferScript.getNSButton().setToolTip_("If checked, only measures glyphs belonging to the selected writing system. Can be combined with the other Limit options. (Ignored for calculating the OS/2 usWin values, but respected for mark-to-base calculation.)")
self.w.preferScriptPopup = vanilla.PopUpButton( (inset+115, linePos+1, -inset-25, 17), (u"latin", u"greek"), sizeStyle='small', callback=self.SavePreferences )
self.w.preferScriptPopup.getNSPopUpButton().setToolTip_("Choose a writing system ('script') you want the measurements to be limited to. May make sense to ignore other scripts if the font is intended only for e.g. Cyrillic. Does not apply to OS/2 usWin")
self.w.preferScriptUpdate = vanilla.SquareButton( (-inset-20, linePos+1, -inset, 18), u"↺", sizeStyle='small', callback=self.update )
self.w.preferScriptUpdate.getNSButton().setToolTip_("Update the script popup to the left with all scripts (writing systems) found in the current font.")
linePos += lineHeight
self.w.preferCategory = vanilla.CheckBox( (inset, linePos, inset+110, 20), u"Limit to category:", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.preferCategory.getNSButton().setToolTip_("If checked, only measures glyphs belonging to the selected glyph category. Can be combined with the other Limit options. (Ignored for calculating the OS/2 usWin values.)")
self.w.preferCategoryPopup = vanilla.PopUpButton( (inset+115, linePos+1, -inset-25, 17), (u"Letter", u"Number"), sizeStyle='small', callback=self.SavePreferences )
self.w.preferCategoryPopup.getNSPopUpButton().setToolTip_("Choose a glyph category you want the measurements to be limited to. It may make sense to limit only to Letter.")
self.w.preferCategoryUpdate = vanilla.SquareButton( (-inset-20, linePos+1, -inset, 18), u"↺", sizeStyle='small', callback=self.update )
self.w.preferCategoryUpdate.getNSButton().setToolTip_("Update the category popup to the left with all glyph categories found in the current font.")
linePos += lineHeight
self.w.allOpenFonts = vanilla.CheckBox( (inset, linePos-1, -inset, 20), u"⚠️ Read out and apply to ALL open fonts", value=False, callback=self.SavePreferences, sizeStyle='small' )
self.w.allOpenFonts.getNSButton().setToolTip_(u"If activated, does not only measure the frontmost font, but all open fonts. Careful: when you press the Apply button, will also apply it to all open fonts. Useful if you have all font files for a font family open.")
linePos += lineHeight
self.w.helpButton = vanilla.HelpButton((inset-2, -20-inset, 21, -inset+2), callback=self.openURL )
self.w.helpButton.getNSButton().setToolTip_("Opens the Vertical Metrics tutorial (highly recommended) in your web browser.")
self.w.runButton = vanilla.Button( (-120-inset, -20-inset, -inset, -inset), "Apply to Font", sizeStyle='regular', callback=self.VerticalMetricsManagerMain )
self.w.runButton.getNSButton().setToolTip_("Insert the OS/2, hhea and fsSelection values above as custom parameters in the font. The number values will be inserted into each master. Blank values will delete the respective parameters.")
self.w.setDefaultButton( self.w.runButton )
if not self.LoadPreferences():
print("Note: 'Vertical Metrics Manager' could not load preferences. Will resort to defaults")
self.w.open()
self.w.makeKey()
def updateUI(self, sender=None):
self.w.includeAllMasters.enable(not self.w.allOpenFonts.get())
self.w.runButton.setTitle( "Apply to Font%s" % (
"s" if self.w.allOpenFonts.get() else ""
))
def SavePreferences( self, sender ):
try:
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.allOpenFonts"] = self.w.allOpenFonts.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferSelectedGlyphs"] = self.w.preferSelectedGlyphs.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferCategory"] = self.w.preferCategory.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferScript"] = self.w.preferScript.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.ignoreNonExporting"] = self.w.ignoreNonExporting.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.includeAllMasters"] = self.w.includeAllMasters.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.respectMarkToBaseOffset"] = self.w.respectMarkToBaseOffset.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.round"] = self.w.round.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.roundValue"] = self.w.roundValue.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.useTypoMetrics"] = self.w.useTypoMetrics.get()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaGap"] = int(self.w.hheaGap.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaDesc"] = int(self.w.hheaDesc.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaAsc"] = int(self.w.hheaAsc.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoGap"] = int(self.w.typoGap.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoDesc"] = int(self.w.typoDesc.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoAsc"] = int(self.w.typoAsc.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winDesc"] = int(self.w.winDesc.getNSTextField().integerValue())
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winAsc"] = int(self.w.winAsc.getNSTextField().integerValue())
self.updateUI()
except:
import traceback
print(traceback.format_exc())
return False
return True
def LoadPreferences( self ):
try:
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.allOpenFonts", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.preferSelectedGlyphs", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.preferCategory", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.preferScript", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.ignoreNonExporting", 1)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.includeAllMasters", 1)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.respectMarkToBaseOffset", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.round", 1)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.roundValue", 10)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.useTypoMetrics", 1)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.hheaGap", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.hheaDesc", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.hheaAsc", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.typoGap", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.typoDesc", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.typoAsc", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.winDesc", 0)
Glyphs.registerDefault("com.mekkablue.VerticalMetricsManager.winAsc", 0)
self.w.allOpenFonts.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.allOpenFonts"] )
self.w.preferSelectedGlyphs.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferSelectedGlyphs"] )
self.w.preferCategory.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferCategory"] )
self.w.preferScript.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferScript"] )
self.w.ignoreNonExporting.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.ignoreNonExporting"] )
self.w.includeAllMasters.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.includeAllMasters"] )
self.w.respectMarkToBaseOffset.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.respectMarkToBaseOffset"] )
self.w.round.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.round"] )
self.w.roundValue.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.roundValue"] )
self.w.useTypoMetrics.set( Glyphs.defaults["com.mekkablue.VerticalMetricsManager.useTypoMetrics"] )
self.w.hheaGap.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaGap"] )
self.w.hheaDesc.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaDesc"] )
self.w.hheaAsc.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaAsc"] )
self.w.typoGap.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoGap"] )
self.w.typoDesc.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoDesc"] )
self.w.typoAsc.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoAsc"] )
self.w.winDesc.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winDesc"] )
self.w.winAsc.set( "%i"%Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winAsc"] )
self.updateUI()
except:
import traceback
print(traceback.format_exc())
return False
return True
def openURL( self, sender ):
URL = None
if sender == self.w.helpButton:
URL = "https://glyphsapp.com/tutorials/vertical-metrics"
if URL:
import webbrowser
webbrowser.open( URL )
def update(self, sender=None):
Glyphs.clearLog()
if not self.SavePreferences( self ):
print("Note: 'Vertical Metrics Manager' could not write preferences.")
frontmostFont = Glyphs.font
allOpenFonts = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.allOpenFonts"]
if allOpenFonts:
theseFonts = Glyphs.fonts
else:
theseFonts = (frontmostFont,)
theseFamilyNames = [f.familyName for f in theseFonts]
print("\nVertical Metrics Manager\nUpdating values for:\n")
for i, thisFont in enumerate(theseFonts):
print("%i. %s:"%(i+1, thisFont.familyName))
if thisFont.filepath:
print(thisFont.filepath)
else:
print("⚠️ The font file has not been saved yet.")
print()
ignoreNonExporting = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.ignoreNonExporting"]
includeAllMasters = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.includeAllMasters"]
shouldRound = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.round"]
roundValue = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.roundValue"])
respectMarkToBaseOffset = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.respectMarkToBaseOffset"]
shouldLimitToScript = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferScript"]
selectedScript = self.w.preferScriptPopup.getTitle()
if sender == self.w.winUpdate:
print("Determining OS/2 usWin values:\n")
lowest, highest = 0.0, 0.0
lowestGlyph, highestGlyph = None, None
highestTopAnchor, lowestBottomAnchor = 0.0, 1.0
highestTopAnchorGlyph, lowestBottomAnchorGlyph = None, None
largestTopMark, largestBottomMark = 0.0, 0.0
largestTopMarkGlyph, largestBottomMarkGlyph = None, None
fontReport = ""
for i,thisFont in enumerate(theseFonts):
if allOpenFonts:
fontReport = "%i. %s, " % (i+1, thisFont.familyName)
currentMaster = thisFont.selectedFontMaster
for thisGlyph in thisFont.glyphs:
if thisGlyph.export or not ignoreNonExporting:
scriptCheckOK = not shouldLimitToScript or thisGlyph.script == selectedScript
for thisLayer in thisGlyph.layers:
belongsToCurrentMaster = thisLayer.associatedFontMaster() == currentMaster
if belongsToCurrentMaster or includeAllMasters or allOpenFonts:
if thisLayer.isSpecialLayer or thisLayer.isMasterLayer:
lowestPointInLayer = thisLayer.bounds.origin.y
highestPointInLayer = lowestPointInLayer + thisLayer.bounds.size.height
if lowestPointInLayer < lowest:
lowest = lowestPointInLayer
lowestGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
if highestPointInLayer > highest:
highest = highestPointInLayer
highestGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
if respectMarkToBaseOffset and scriptCheckOK:
if thisGlyph.category == "Mark":
topAnchors = [a for a in thisLayer.anchorsTraversingComponents() if a.name=="_top"]
if topAnchors:
topAnchor = topAnchors[0]
topSpan = highestPointInLayer - topAnchor.y
if topSpan > largestTopMark:
largestTopMark = topSpan
largestTopMarkGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
bottomAnchors = [a for a in thisLayer.anchorsTraversingComponents() if a.name=="_bottom"]
if bottomAnchors:
bottomAnchor = bottomAnchors[0]
bottomSpan = abs(lowestPointInLayer - bottomAnchor.y)
if bottomSpan > largestBottomMark:
largestBottomMark = bottomSpan
largestBottomMarkGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
else:
topAnchors = [a for a in thisLayer.anchorsTraversingComponents() if a.name=="top"]
if topAnchors:
topAnchor = topAnchors[0]
if topAnchor.y > highestTopAnchor:
highestTopAnchor = topAnchor.y
highestTopAnchorGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
bottomAnchors = [a for a in thisLayer.anchorsTraversingComponents() if a.name=="bottom"]
if bottomAnchors:
bottomAnchor = bottomAnchors[0]
if bottomAnchor.y < lowestBottomAnchor:
lowestBottomAnchor = bottomAnchor.y
lowestBottomAnchorGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
print("Highest relevant glyph:")
print("- %s (%i)" % (highestGlyph, highest))
print()
print("Lowest relevant glyph:")
print("- %s (%i)" % (lowestGlyph, lowest))
print()
if respectMarkToBaseOffset:
highestMarkToBase = highestTopAnchor+largestTopMark
lowestMarkToBase = lowestBottomAnchor-largestBottomMark
print("Highest top anchor:")
print("- %s (%i)" % (highestTopAnchorGlyph, highestTopAnchor))
print("Largest top mark span (_top to top edge):")
print("- %s (%i)" % (largestTopMarkGlyph, largestTopMark))
print("Highest possible mark-to-base: %i + %i = %i" % (highestTopAnchor, largestTopMark, highestMarkToBase))
print()
print("Lowest bottom anchor:")
print("- %s (%i)" % (lowestBottomAnchorGlyph, lowestBottomAnchor))
print("Largest bottom mark span (_bottom to bottom edge):")
print("- %s (%i)" % (largestBottomMarkGlyph, largestBottomMark))
print("Lowest possible mark-to-base: %i - %i = %i" % (lowestBottomAnchor, largestBottomMark, lowestMarkToBase))
print()
if lowestMarkToBase < lowest:
lowest = lowestMarkToBase
if highestMarkToBase > highest:
highest = highestMarkToBase
if shouldRound:
highest = roundUpByValue(highest,roundValue)
lowest = roundUpByValue(lowest,roundValue)
winAsc = int(highest)
winDesc = abs(int(lowest))
print("Calculated values:")
print("- usWinAscent: %s" % winAsc)
print("- usWinDescent: %s" % winDesc)
print()
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winAsc"] = winAsc
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winDesc"] = winDesc
elif sender == self.w.useTypoMetricsUpdate:
print("Use Typo Metrics (fsSelection bit 7) should always be YES.")
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.useTypoMetrics"] = 1
elif sender in (self.w.hheaUpdate, self.w.typoUpdate):
if sender == self.w.hheaUpdate:
name = "hhea"
else:
name = "OS/2 sTypo"
print("Determining %s values:\n" % name)
lowest, highest = 0.0, 0.0
lowestGlyph, highestGlyph = None, None
shouldLimitToCategory = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferCategory"]
shouldLimitToScript = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferScript"]
shouldLimitToSelectedGlyphs = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferSelectedGlyphs"]
selectedCategory = self.w.preferCategoryPopup.getTitle()
selectedScript = self.w.preferScriptPopup.getTitle()
if shouldLimitToSelectedGlyphs:
selectedGlyphNames = [l.parent.name for l in frontmostFont.selectedLayers]
if not selectedGlyphNames:
print(u"⚠️ Ignoring limitation to selected glyphs because no glyphs are selected (in frontmost font).")
shouldLimitToSelectedGlyphs = False
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.preferSelectedGlyphs"] = shouldLimitToSelectedGlyphs
self.LoadPreferences()
else:
selectedGlyphNames = ()
for i,thisFont in enumerate(theseFonts):
if allOpenFonts:
fontReport = "%i. %s, " % (i+1, thisFont.familyName)
else:
fontReport = ""
currentMaster = thisFont.selectedFontMaster
for thisGlyph in thisFont.glyphs:
exportCheckOK = not ignoreNonExporting or thisGlyph.export
categoryCheckOK = not shouldLimitToCategory or thisGlyph.category == selectedCategory
scriptCheckOK = not shouldLimitToScript or thisGlyph.script == selectedScript
selectedCheckOK = not shouldLimitToSelectedGlyphs or thisGlyph.name in selectedGlyphNames
if exportCheckOK and categoryCheckOK and scriptCheckOK and selectedCheckOK:
for thisLayer in thisGlyph.layers:
belongsToCurrentMaster = thisLayer.associatedFontMaster() == currentMaster
if belongsToCurrentMaster or includeAllMasters or allOpenFonts:
if thisLayer.isSpecialLayer or thisLayer.isMasterLayer:
lowestPointInLayer = thisLayer.bounds.origin.y
highestPointInLayer = lowestPointInLayer + thisLayer.bounds.size.height
if lowestPointInLayer < lowest:
lowest = lowestPointInLayer
lowestGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
if highestPointInLayer > highest:
highest = highestPointInLayer
highestGlyph = "%s%s, layer: %s" % (fontReport, thisGlyph.name, thisLayer.name)
print("Highest relevant glyph:")
print("- %s (%i)" % (highestGlyph, highest))
print()
print("Lowest relevant glyph:")
print("- %s (%i)" % (lowestGlyph, lowest))
print()
if shouldRound:
highest = roundUpByValue(highest,roundValue)
lowest = roundUpByValue(lowest,roundValue)
asc = int(highest)
desc = int(lowest)
xHeight = 0
for thisFont in theseFonts:
for thisMaster in thisFont.masters:
measuredX = thisMaster.xHeight
if measuredX >= thisMaster.capHeight:
measuredX = thisMaster.capHeight/2
if measuredX > xHeight:
xHeight = thisMaster.xHeight
if shouldRound:
xHeight = roundUpByValue(xHeight, roundValue)
idealLineSpan = abs(xHeight * 2.2)
if shouldRound:
idealLineSpan = roundUpByValue(idealLineSpan, roundValue)
actualLineSpan = abs(asc)+abs(desc)
if idealLineSpan > actualLineSpan:
gap = idealLineSpan - actualLineSpan
if shouldRound:
gap = roundUpByValue(gap, roundValue)
else:
gap = 0
print("Calculated values:")
print("- %s Ascender: %i" % (name, asc))
print("- %s Descender: %i" % (name, desc))
print("- %s LineGap: %i" % (name, gap))
print()
if sender == self.w.hheaUpdate:
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaAsc"] = asc
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaDesc"] = desc
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaGap"] = gap
else:
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoAsc"] = asc
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoDesc"] = desc
Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoGap"] = gap
elif sender == self.w.preferScriptUpdate:
scripts = []
shouldIgnoreNonExporting = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.ignoreNonExporting"]
for thisGlyph in frontmostFont.glyphs:
inclusionCheckOK = thisGlyph.export or not shouldIgnoreNonExporting
if inclusionCheckOK and thisGlyph.script and not thisGlyph.script in scripts:
scripts.append(thisGlyph.script)
if scripts:
self.w.preferScriptPopup.setItems(scripts)
print(u"✅ Found scripts:\n%s" % ", ".join(scripts))
else:
msg = u"Found no glyphs belonging to any script in the frontmost font. Please double check."
print("⚠️ %s"%msg)
Message(title="Error Determining Scripts", message="Cannot determine list of scripts. %s"%msg, OKButton=None)
elif sender == self.w.preferCategoryUpdate:
categories = []
shouldIgnoreNonExporting = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.ignoreNonExporting"]
for thisGlyph in thisFont.glyphs:
inclusionCheckOK = thisGlyph.export or not shouldIgnoreNonExporting
if inclusionCheckOK and not thisGlyph.category in categories:
categories.append(thisGlyph.category)
if categories:
self.w.preferCategoryPopup.setItems(categories)
print(u"✅ Found categories:\n%s" % ", ".join(categories))
else:
msg = u"Found no glyphs belonging to any category in the current font. Please double check."
print("⚠️ %s"%msg)
Message(title="Error Determining Categories", message="Cannot determine list of categories. %s"%msg, OKButton=None)
self.LoadPreferences()
print("hheaGap", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaGap"])
print("hheaDesc", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaDesc"])
print("hheaAsc", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaAsc"])
print("typoGap", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoGap"])
print("typoDesc", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoDesc"])
print("typoAsc", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoAsc"])
print("winDesc", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winDesc"])
print("winAsc", Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winAsc"])
def VerticalMetricsManagerMain( self, sender ):
try:
Glyphs.clearLog()
print("Vertical Metrics Manager: setting parameters\n")
if not self.SavePreferences( self ):
print("Note: 'Vertical Metrics Manager' could not write preferences.\n")
typoAsc = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoAsc"])
typoDesc = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoDesc"])
typoGap = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.typoGap"])
hheaAsc = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaAsc"])
hheaDesc = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaDesc"])
hheaGap = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.hheaGap"])
winDesc = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winDesc"])
winAsc = int(Glyphs.defaults["com.mekkablue.VerticalMetricsManager.winAsc"])
verticalMetricDict = {
"typoAscender": typoAsc,
"typoDescender": typoDesc,
"typoLineGap": typoGap,
"hheaAscender": hheaAsc,
"hheaDescender": hheaDesc,
"hheaLineGap": hheaGap,
"winDescent": winDesc,
"winAscent": winAsc,
}
allOpenFonts = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.allOpenFonts"]
if allOpenFonts:
theseFonts = Glyphs.fonts
else:
theseFonts = (Glyphs.font,)
for i, thisFont in enumerate(theseFonts):
print("\n\n🔠 %s%s:"%(
"%i. "%(i+1) if allOpenFonts else "",
thisFont.familyName,
))
if thisFont.filepath:
print("📄 %s" % thisFont.filepath)
else:
print("⚠️ The font file has not been saved yet.")
for verticalMetricName in sorted(verticalMetricDict.keys()):
try:
metricValue = int( verticalMetricDict[verticalMetricName] )
print(u"🔢 %s: %i" % (verticalMetricName, metricValue))
for thisMaster in thisFont.masters:
thisMaster.customParameters[verticalMetricName] = metricValue
print(u" ✅ Master %s: custom parameter set." % thisMaster.name)
except:
print(u"❌ %s: No valid value found. Deleting parameters:" % verticalMetricName)
for thisMaster in thisFont.masters:
if thisMaster.customParameters[verticalMetricName]:
del thisMaster.customParameters[verticalMetricName]
print(u" ⚠️ Master %s: custom parameter removed." % thisMaster.name)
else:
print(u" ❎ Master %s: no custom parameter found." % thisMaster.name)
useTypoMetrics = Glyphs.defaults["com.mekkablue.VerticalMetricsManager.useTypoMetrics"]
print(u"*️⃣ Use Typo Metrics (fsSelection bit 7)")
if useTypoMetrics:
thisFont.customParameters["Use Typo Metrics"] = True
print(u" ✅ Set Use Typo Metrics parameter to YES.")
else:
thisFont.customParameters["Use Typo Metrics"] = False
print(u" ⁉️ Set Use Typo Metrics parameter to NO. This is not recommended. Are you sure?")
Glyphs.showNotification(
u"Vertical Metrics Set",
u"Set vertical metrics in %i font%s. Detailed report in Macro Window." % (
len(theseFonts),
"" if len(theseFonts)==1 else "s",
),
)
except Exception as e:
Glyphs.showMacroWindow()
print("Vertical Metrics Manager Error: %s" % e)
import traceback
print(traceback.format_exc())
VerticalMetricsManager() | true | true |
f73adf1bc3e952e9c8b790f0056b84b1b0be2d37 | 30,413 | py | Python | sublimeText3/Packages/SublimeCodeIntel/libs/koXMLTreeService.py | MoAnsir/dot_file_2017 | 5f67ef8f430416c82322ab7e7e001548936454ff | [
"MIT"
] | 2 | 2018-04-24T10:02:26.000Z | 2019-06-02T13:53:31.000Z | Data/Packages/SublimeCodeIntel/libs/koXMLTreeService.py | Maxize/Sublime_Text_3 | be620476b49f9a6ce2ca2cfe825c4e142e7e82b9 | [
"Apache-2.0"
] | 1 | 2016-02-10T09:50:09.000Z | 2016-02-10T09:50:09.000Z | Packages/SublimeCodeIntel/libs/koXMLTreeService.py | prisis/sublime-text-packages | 99ae8a5496613e27a75e5bd91723549b21476e60 | [
"MIT"
] | 2 | 2019-04-11T04:13:02.000Z | 2019-06-02T13:53:33.000Z | # ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
import time
import re
from cElementTree import TreeBuilder, XMLParser, Element
import logging
log = logging.getLogger("koXMLTreeService")
# log.setLevel(logging.INFO)
class recollector:
def __init__(self):
self.res = {}
self.regs = {}
def add(self, name, reg, mods=None):
self.regs[name] = reg % self.regs
# print "%s = %s" % (name, self.regs[name])
if mods:
self.res[name] = re.compile(self.regs[
name], mods) # check that it is valid
else:
self.res[name] = re.compile(self.regs[
name]) # check that it is valid
collector = recollector()
a = collector.add
a("S", "[ \\n\\t\\r]+")
a("NameStrt", "[A-Za-z_:]|[^\\x00-\\x7F]")
a("NameChar", "[A-Za-z0-9_:.-]|[^\\x00-\\x7F]")
a("Name", "(?:%(NameStrt)s)(?:%(NameChar)s)*")
a("AttValSE", "\"[^<\"]*\"|'[^<']*'")
a("attrfinderRE", "(?:[\n \t]*)(%(Name)s)(?:%(S)s)?=(?:%(S)s)?(%(AttValSE)s)")
a("namespaces",
'xmlns(?::(?P<prefix>\w+))?=(?P<ns>(?:")([^"]*?)(?:")|(?:\')([^\']*?)(?:\'))', re.S | re.U)
a("tagpart",
'(?:<(?![?!-/>\s]))((?:(?P<prefix>[^\s/>]+):)?(?P<name>[^:\s/>]+)?)(?:\s+(?P<data>[^/<>]*))?', re.S | re.U)
a("tags", '<!--.*?-->|%(tagpart)s(?:/)?>', re.S | re.U)
a("alltags", '<!--.*?-->|(<[^\[!>?-].*?>)', re.S | re.U)
a("QuoteSE", "\"[^\"]*\"|'[^']*'")
a("DOCTYPE",
r'<!DOCTYPE\s+(?P<type>\S+)\s+(?P<ident>PUBLIC|SYSTEM)\s+(?P<data1>%(QuoteSE)s)\s*(?P<data2>%(QuoteSE)s)?\s*(?:\[|>)', re.S)
def getdoctype(text):
doctype = None
regex = collector.res["DOCTYPE"]
m = regex.search(text)
if m:
m = m.groupdict()
# [1:-1] is to strip quotes
if m['data1']:
m['data1'] = m['data1'][1:-1]
if m['data2']:
m['data2'] = m['data2'][1:-1]
if m['ident'] == 'PUBLIC':
doctype = (m['type'], m['ident'], m['data1'], m['data2'])
else:
doctype = (m['type'], m['ident'], "", m['data1'])
return doctype
def getattrs(text):
attrs = {}
regex = collector.res["attrfinderRE"]
match = regex.findall(text)
for a in match:
if a[1]:
attrs[a[0]] = a[1][1:-1]
else:
attrs[a[0]] = ""
return attrs
def currentTag(text):
m = collector.res["tagpart"].search(text)
if not m:
return None
td = m.groupdict()
ad = {}
if td['data']:
ad.update(getattrs(td['data']))
return (td['prefix'], td['name'], ad, m.start(0))
def elementFromTag(tree, tag, parent=None):
tagName = tag[1]
if not tagName:
tagName = ""
ns = None
if tag[0]:
if tag[0] in tree.prefixmap:
ns = tree.prefixmap[tag[0]]
else:
nsattr = "xmlns:%s" % tag[0]
if nsattr in tag[2]:
ns = tag[2][nsattr]
del tag[2][nsattr]
tree.prefixmap[tag[0]] = ns
elif "xmlns" in tag[2]:
ns = tag[2]["xmlns"]
del tag[2]["xmlns"]
elif parent is not None:
ns = parent.ns
localName = tag
if ns:
tagName = "{%s}%s" % (ns, tagName)
elem = Element(tagName, tag[2])
try:
elem.start = tree.err_info
elem.end = None
except:
# will happen when parsing with cElementTree
pass
# print elem.localName
if parent is not None:
parent.append(elem)
tree.nodemap[elem] = parent
tree.nodes.append(elem)
if elem.ns is not None:
if elem.ns not in tree.tags:
tree.tags[elem.ns] = {}
tree.tags[elem.ns][elem.localName] = elem
return elem
def elementFromText(tree, text, parent=None):
current = currentTag(text)
if current:
return elementFromTag(tree, current, parent)
return None
class iterparse:
"""iterparse that catches syntax errors so we can still handle any
events that happen prior to the syntax error"""
def __init__(self, content, events=("start", "end", "start-ns", "end-ns")):
self.content = content
self._events = events
self.err = None
self.err_info = None
self.root = None
def __iter__(self):
events = []
b = TreeBuilder()
p = XMLParser(b)
p._setevents(events, self._events)
try:
p.feed(self.content)
except SyntaxError as e:
self.err = e
self.err_info = (
p.CurrentLineNumber, p.CurrentColumnNumber, p.CurrentByteIndex)
for event in events:
yield event
del events[:]
try:
self.root = p.close()
except SyntaxError as e:
# if we had a previous syntax error, keep it
if not self.err:
self.err = e
self.err_info = (
p.CurrentLineNumber, p.CurrentColumnNumber, p.CurrentByteIndex)
for event in events:
yield event
def bisect_left_nodes_start(a, x, lo=0, hi=None):
"""A version of bisect.bisect_left which compares nodes based on their start position.
"""
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
# print "comparing", a[mid].start[:2], "and", x
if a[mid].start is None:
return mid
if a[mid].start[:2] == x: return mid
if a[mid].start[:2] < x: lo = mid+1
else:
hi = mid
return lo
class XMLDocument(object):
def __init__(self, content=None):
self.content = content
self.reset()
if self.content:
self.getDoctype()
def getDoctype(self):
self.doctype = getdoctype(self.content)
if self.doctype:
self.publicId = self.doctype[2]
self.systemId = self.doctype[3]
def reset(self):
self.doctype = None
self.publicId = None
self.systemId = None
self.err = None
self.err_info = None
self.root = None
self.current = None
self._rootnodes = []
self.nodes = [] # flat list of all nodes
self.tags = {} # { namespace_uri: { tag_local_name: elem, ...} , ...}
self.nodemap = {} # {child_elem: parent_elem, ... }
self.namespaces = [] # flat list of namespace uri's
self.nsmap = {} # { "http:/...": "xslt", ... }
self.prefixmap = {} # { "xslt": "http://.....", ... }
def getRoots(self):
# return a list of all nodes that have no parent
if not self._rootnodes:
self._rootnodes = [
node for node in self.nodemap if self.nodemap[node] is None]
return self._rootnodes
def namespace(self, elem):
# print "%s:%s xmlns[%s]"%(self.prefix(elem),elem.localName,elem.ns)
if hasattr(elem, "ns") and elem.ns:
return elem.ns
return self.nsmap.get("")
def parent(self, elem):
return self.nodemap.get(elem)
def qname(self, name):
if name and name[0] == '{':
ns, ln = name[1:].split('}')
prefix = self.nsmap.get(ns)
if prefix:
return "%s:%s" % (prefix, ln)
return ln
return name
def isAncestorOf(self, node, child):
""" Return true if child is a descendant of node """
# print "asking if %r is an ancestor of %r" %( node, child)
currentParent = self.parent(child)
while currentParent != child and currentParent is not None:
# print "\tparent =", currentParent
if node == currentParent:
# print "-->is a parent"
return True
currentParent = self.parent(currentParent)
# print "-->isn't a parent"
return False
def locateNode(self, line, col):
# nodes are 1-indexed, so we need to switch our indexing scheme
line += 1
# first look for the last node to start at or before the current
# position
idx = bisect_left_nodes_start(self.nodes, (line, col))-1
if idx < 0:
if self.nodes:
return self.nodes[0]
return None
assert idx < len(self.nodes)
node = self.nodes[idx]
# that was easy. Now we may need to move up the parent chain
# from this node if we are past the end of a node but before
# the beginning of another, e.g. <foo><bar>asd</bar>|</foo>
# -- the right node is foo, but the current value of node is 'bar'
startPos = node.start[:2]
if startPos is None: # if we're in a partial node, that's it
return node
if startPos[:2] == (line, col): # if it's an exact match, that's it
return node
# if idx == 0: return node # if we're at the toplevel, so be it
while node is not None:
while node.end:
# move up the parent chain until you get a parent
# whose end is after the current location
last_line, last_col = node.end[:2]
if (last_line, last_col) < (line, col):
node = self.parent(node)
if node is None:
return node
continue
break
if node is not None and not node.end:
# check it's parents and see if they have end markers
pnode = self.parent(node)
while pnode:
if pnode.end:
last_line, last_col = pnode.end[:2]
if (last_line, last_col) < (line, col):
node = pnode
break
pnode = self.parent(pnode)
if node.end:
continue
break
return node
def prefixFromNS(self, ns):
if self.prefixmap.get("") == ns:
return ""
prefix = self.nsmap.get(ns)
if not prefix:
prefix = self.nsmap.get(self.root.ns)
return prefix
def prefix(self, elem):
if not hasattr(elem, "ns") or not elem.ns:
return ""
return self.prefixFromNS(elem.ns)
def tagname(self, elem):
prefix = self.prefix(elem)
if prefix:
return "%s:%s" % (prefix, elem.localName)
return elem.localName
_endtagRe = re.compile(r"(</(\w+:)?\w+>)", re.U)
def parse(self, content=None):
self.reset()
self.content = content
if content:
# first, find the doctype decl
self.getDoctype()
elif not self.content:
raise Exception("no content to parse")
elstack = [None]
self.current = None
tags = {}
last_pos_ok = None
iter = iterparse(self.content)
for event, elem in iter:
if event == "start":
# print "%r %r %d %d %d" % (event, elem, elem.start[0],
# elem.start[1], elem.start[2])
self.nodemap[elem] = self.current
self.nodes.append(elem)
if elem.ns not in self.tags:
self.tags[elem.ns] = {}
self.tags[elem.ns][elem.localName] = elem
elstack.append(elem)
self.current = elem
elif event == "end":
# print "%r %r %r %r" % (event, elem, elem.start, elem.end)
if elem.end:
try:
pos = elem.end[2]
# print " len %d pos %d" % (len(self.content), pos)
# put the end location at the end of the end tag
m = self._endtagRe.match(self.content[pos:])
if m and m.groups():
pos = pos + m.end(1)
if pos > 0:
# we want to be after the ">"
diff = pos - elem.end[2] + 1
elem.end = (elem.end[
0], elem.end[1] + diff, pos)
except IndexError as e:
# XXX FIXME BUG 56337
log.exception(e)
pass
node = elstack.pop()
if elstack[-1] is None:
self._rootnodes.append(node)
self.current = elstack[-1]
elif event == "start-ns":
self.namespaces.append(elem)
self.prefixmap[elem[0]] = elem[1]
self.nsmap[elem[1]] = elem[0]
elif event == "end-ns":
self.namespaces.pop()
self.root = iter.root
self.err = iter.err
self.err_info = iter.err_info
# set the root if we can
if self.root is None and self.nodes:
self.root = self.nodes[0]
self.end_error(self.content)
# if we still do not have a root, do it
# now, as we should have a node
if self.root is None and self.nodes:
self.root = self.nodes[0]
# release content
self.content = None
def end_error(self, content):
if not self.err_info:
return
if not content:
raise Exception("No content?")
# create an element for the last part of the parse
parent = self.current
if self.err_info[2] >= 0:
start = self.err_info[2]
else:
# slower
# print self.err_info
p = 0
for i in range(self.err_info[0] - 1):
# use re.search("\r|\n|\r\n")
p = content.find("\n", p + 1)
start = p + self.err_info[1] + 1
end = content.find("<", start+1)
if end <= start:
end = len(content)
# fixup the start position
start = content.rfind(">", 0, start) + 1
if start >= end:
return
# print self.err_info
# print content[start:end]
current = currentTag(content[start:end])
if not current:
return
# print "%s:%s %r %d" % current
# fix error info
start = start+current[3]
line = content.count('\n', 0, start)
col = start - content.rfind('\n', 0, start)
self.err_info = (line, col, start)
self.current = elem = elementFromTag(self, current, parent)
def dump(self):
print("error ", self.err)
print("error_info ", self.err_info)
print("%d nodes created" % len(self.nodemap))
print("doctype ", self.doctype)
print("publicId ", self.publicId)
print("systemId ", self.systemId)
print(self.prefixmap)
print(self.nsmap)
print("root ", self.root)
if self.root:
print("root tag ", self.root.tag)
print("root ns ", self.root.ns)
print("root localName ", self.root.localName)
print("root start ", self.root.start)
print("root end ", self.root.end)
print("tree.current ", self.current)
import HTMLTreeParser
class HTMLDocument(XMLDocument):
def parse(self, content=None):
if content:
self.reset()
self.content = content
# first, find the doctype decl
self.getDoctype()
elif not self.content:
raise Exception("no content to parse")
p = HTMLTreeParser.Parser(HTMLTreeParser.HTMLTreeBuilder())
p.feed(content)
self.root = p.close()
self.nodes = p._builder.nodes
self.nodemap = p._builder.nodemap
self._rootnodes = p._builder._rootnodes
self.current = p._builder.current
class TreeService:
__treeMap = {} # map uri to elementtree
def __init__(self):
pass
def treeFromCache(self, uri):
if uri in self.__treeMap:
# print "tree cache hit for [%s]"%uri
return self.__treeMap[uri]
return None
def getTreeForURI(self, uri, content=None):
if not uri and not content:
return None
tree = None
if uri and uri in self.__treeMap:
tree = self.__treeMap[uri]
# if tree is not None:
# print "tree cache hit for [%s]"%uri
if not content:
return tree
if not tree:
if not content:
# get the content
try:
f = open(uri, 'r')
content = f.read(-1)
f.close()
except IOError as e:
# ignore file errors and return an empty tree
content = ""
if not content.startswith("<?xml"):
tree = HTMLDocument()
if not tree:
tree = XMLDocument()
# raise Exception("NOT IMPLEMENTED YET")
if content:
tree.parse(content)
if uri:
self.__treeMap[uri] = tree
return tree
def getTreeForContent(self, content):
return self.getTreeForURI(None, content)
__treeservice = None
def getService():
global __treeservice
if not __treeservice:
__treeservice = TreeService()
return __treeservice
if __name__ == "__main__":
import sys
# basic logging configuration
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
bigfile = "/Users/shanec/main/Apps/Komodo-devel/test/bigfile.xml"
fn = "/Users/shanec/main/Apps/Komodo-devel/src/samples/xslt_sample.xsl"
from elementtree.ElementTree import tostring
if 0:
# fn = "/Users/shanec/main/Apps/Komodo-devel/src/install/wix/feature-
# core.wxs"
t1 = time.clock()
tree = getService().getTreeForURI(bigfile)
t2 = time.clock()
print("cElementTree took ", (t2-t1))
tree.dump()
if 0:
f = open(bigfile, 'r')
content = f.read(-1)
f.close()
t1 = time.clock()
tree = HTMLDocument()
tree.parse(content)
t2 = time.clock()
print("HTMLBuilder took ", (t2-t1))
if 0:
print(currentTag("<xsl"))
print(currentTag("<xsl:"))
print(currentTag("<xsl:tag"))
print(currentTag("text><xsl:tag"))
# print nodemap
html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
"""
tree = getService().getTreeForURI("Text.html", html)
print(tostring(tree.root))
html = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<HEAD>
<TITLE>Mozilla Cross-Reference</TITLE>
<link HREF=http://www.activestate.com/global.css rel="stylesheet" type="text/css">
</HEAD>
<BODY BGCOLOR="#FFFFFF" TEXT="#000000"
LINK="#0000EE" VLINK="#551A8B" ALINK="#FF0000">
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td>
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td width="145"><a href=http://www.activestate.com/index.html><img src=http://www.activestate.com/img/Main_Logo_Border.gif width="167" height="66" border="0" alt="ActiveState Tool Corp."></a></td>
<td bgcolor="#000000" colspan=2 width="90%" align="center"><img src=http://www.activestate.com/img/Main_Banner.gif alt="Programming for the People."></td>
</tr>
</table>
<table width="100%" bgcolor="#000000" border="0" cellpadding="0" cellspacing="0">
<tr>
<td width="600">
<table width="600" border="0" cellpadding="0" cellspacing="3">
<tr>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Products/index.html>Products</a></td>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Support/index.html>Support</a></td>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Corporate/index.html>About Us</a></td>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Contact_Us.html>Contact</a></td>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Site_Map.html>Site Map</a></td>
</tr>
</table>
</td>
<td class="mainnav" width="100%">
<table width="100%" border="0" cellpadding="0" cellspacing="0">
<tr>
<td class="mainnav" bgcolor="#C2B266" width="100%"> </td>
<td class="mainnav" bgcolor="#000000" width="3"> </td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr>
</table>
<I>$treename</I>
<P>
"""
tree = getService().getTreeForURI("Text.html", html)
print(tostring(tree.root))
html = """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
<HTML>
<BODY>
<FORM><FIELDSET ><SELECT class=""><OPTGROUP >
"""
tree = getService().getTreeForContent(html)
tree = getService().getTreeForURI("newfile.txt", "")
tree = getService().getTreeForURI("newfile.txt", "<html>")
tree = getService().getTreeForURI("newfile.txt", "<html> <")
node = tree.locateNode(tree.current.start[0], tree.current.start[1])
assert node == tree.current, "locateNode returned incorrect node"
tree = getService().getTreeForURI("newfile.txt", "<table></table>\n\n\n\n")
node = tree.locateNode(2, 0)
assert node is None, "locateNode returned incorrect node"
node = tree.locateNode(0, 7)
assert node is not None, "locateNode returned incorrect node"
sys.exit(0)
xml = """
<c1><c2 a1="1" a2='1' a3='val'><e1 /><e2 f1="1" f2 = '33' /><c3 a='1'>blah</c3></c2 > </"""
tree = getService().getTreeForContent(xml)
node = tree.locateNode(tree.current.start[0], tree.current.start[1])
assert node == tree.current, "locateNode returned incorrect node"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xxmlns="xyz" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<xsl:template match="Class">
<html> <xsl:apply-imports/>
<xsl:
<xsl:apply-templates select="Order"/>
</html>
</xsl:template>
"""
tree = getService().getTreeForContent(xml)
node = tree.locateNode(tree.current.start[0], tree.current.start[1])
assert node == tree.current, "locateNode returned incorrect node"
# ensure we get the correct current node
xml = """<?xml version="1.0"?>
<!DOCTYPE window PUBLIC "-//MOZILLA//DTD XUL V1.0//EN" "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<window xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<popupset id="editorTooltipSet">
<popup type="tooltip" id="editorTooltip" flex="1">
<description multiline="true" id="editorTooltip-tooltipText" class="tooltip-label" flex="1"/>
</popup><
<popup type="autocomplete" id="popupTextboxAutoComplete"/>
</popupset>
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "popupset", "current element is incorrect"
# ensure we get the correct current node
xml = """<?xml version="1.0"?>
<!DOCTYPE window PUBLIC "-//MOZILLA//DTD XUL V1.0//EN" "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<window xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<popupset id="editorTooltipSet">
<popup type="tooltip" id="editorTooltip" flex="1">
<description multiline="true" id="editorTooltip-tooltipText" class="tooltip-label" flex="1"/>
</popup> <
<popup type="autocomplete" id="popupTextboxAutoComplete"/>
</popupset>
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "popupset", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<
<xsl:template/>
"""
tree = getService().getTreeForContent(xml)
assert tree.current == tree.root, "current element is incorrect"
assert tree.current.localName == "stylesheet", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<xsl:"""
tree = getService().getTreeForContent(xml)
assert tree.current.tag == "{http://www.w3.org/1999/XSL/Transform}", "current element is incorrect"
assert tree.current.localName == "", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "stylesheet", "current element is incorrect"
xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html """
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "html", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<xsl:template
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "template", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/><xsl:template"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "template", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<xsl:
<xsl:template/>
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "", "current element is incorrect"
assert tree.current.tag == "{http://www.w3.org/1999/XSL/Transform}", "current element is incorrect"
html = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><body><p><ul><li><li><li></ul></body>
"""
tree = getService().getTreeForContent(html)
# print tostring(tree.root)
assert tree.current.localName == "html", "current element is incorrect"
html = """<!DOCTYPE h:html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<h:html xmlns:h='urn:test'"""
tree = getService().getTreeForContent(html)
# print tostring(tree.root)
assert tree.current.localName == "html", "current element is incorrect"
# from cElementTree import Element
# tag = u"{urn:test}test"
# print tag
# e = Element(tag, {})
# print e.localName
# print e.tag
xml = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This sample XML file shows you ... -->
<Class>
<Order Name="TINAMIFORMES">
<Family Name="TINAMIDAE">
<Species attr="value">content.</Species>
<![CDATA[
This is a CDATA section
]]>
</Family>
</Order>
"""
tree = getService().getTreeForContent(xml)
# print tostring(tree.root)
assert len(tree.root[0][0][0]) == 0, "bad parent/child relationship"
xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<body
<!-- a comment -->
<title>
</title>
</html>
"""
tree = getService().getTreeForContent(xml)
# print tostring(tree.root)
assert tree.current.localName == "body", "current element is incorrect"
assert tree.parent(
tree.current).localName == "html", "current element is incorrect"
xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html
<body
"""
tree = getService().getTreeForContent(xml)
# print tostring(tree.root)
assert tree.current.localName == "html", "current element is incorrect"
| 35.281903 | 206 | 0.567553 |
import time
import re
from cElementTree import TreeBuilder, XMLParser, Element
import logging
log = logging.getLogger("koXMLTreeService")
class recollector:
def __init__(self):
self.res = {}
self.regs = {}
def add(self, name, reg, mods=None):
self.regs[name] = reg % self.regs
if mods:
self.res[name] = re.compile(self.regs[
name], mods)
else:
self.res[name] = re.compile(self.regs[
name])
collector = recollector()
a = collector.add
a("S", "[ \\n\\t\\r]+")
a("NameStrt", "[A-Za-z_:]|[^\\x00-\\x7F]")
a("NameChar", "[A-Za-z0-9_:.-]|[^\\x00-\\x7F]")
a("Name", "(?:%(NameStrt)s)(?:%(NameChar)s)*")
a("AttValSE", "\"[^<\"]*\"|'[^<']*'")
a("attrfinderRE", "(?:[\n \t]*)(%(Name)s)(?:%(S)s)?=(?:%(S)s)?(%(AttValSE)s)")
a("namespaces",
'xmlns(?::(?P<prefix>\w+))?=(?P<ns>(?:")([^"]*?)(?:")|(?:\')([^\']*?)(?:\'))', re.S | re.U)
a("tagpart",
'(?:<(?![?!-/>\s]))((?:(?P<prefix>[^\s/>]+):)?(?P<name>[^:\s/>]+)?)(?:\s+(?P<data>[^/<>]*))?', re.S | re.U)
a("tags", '<!--.*?-->|%(tagpart)s(?:/)?>', re.S | re.U)
a("alltags", '<!--.*?-->|(<[^\[!>?-].*?>)', re.S | re.U)
a("QuoteSE", "\"[^\"]*\"|'[^']*'")
a("DOCTYPE",
r'<!DOCTYPE\s+(?P<type>\S+)\s+(?P<ident>PUBLIC|SYSTEM)\s+(?P<data1>%(QuoteSE)s)\s*(?P<data2>%(QuoteSE)s)?\s*(?:\[|>)', re.S)
def getdoctype(text):
doctype = None
regex = collector.res["DOCTYPE"]
m = regex.search(text)
if m:
m = m.groupdict()
# [1:-1] is to strip quotes
if m['data1']:
m['data1'] = m['data1'][1:-1]
if m['data2']:
m['data2'] = m['data2'][1:-1]
if m['ident'] == 'PUBLIC':
doctype = (m['type'], m['ident'], m['data1'], m['data2'])
else:
doctype = (m['type'], m['ident'], "", m['data1'])
return doctype
def getattrs(text):
attrs = {}
regex = collector.res["attrfinderRE"]
match = regex.findall(text)
for a in match:
if a[1]:
attrs[a[0]] = a[1][1:-1]
else:
attrs[a[0]] = ""
return attrs
def currentTag(text):
m = collector.res["tagpart"].search(text)
if not m:
return None
td = m.groupdict()
ad = {}
if td['data']:
ad.update(getattrs(td['data']))
return (td['prefix'], td['name'], ad, m.start(0))
def elementFromTag(tree, tag, parent=None):
tagName = tag[1]
if not tagName:
tagName = ""
ns = None
if tag[0]:
if tag[0] in tree.prefixmap:
ns = tree.prefixmap[tag[0]]
else:
nsattr = "xmlns:%s" % tag[0]
if nsattr in tag[2]:
ns = tag[2][nsattr]
del tag[2][nsattr]
tree.prefixmap[tag[0]] = ns
elif "xmlns" in tag[2]:
ns = tag[2]["xmlns"]
del tag[2]["xmlns"]
elif parent is not None:
ns = parent.ns
localName = tag
if ns:
tagName = "{%s}%s" % (ns, tagName)
elem = Element(tagName, tag[2])
try:
elem.start = tree.err_info
elem.end = None
except:
# will happen when parsing with cElementTree
pass
# print elem.localName
if parent is not None:
parent.append(elem)
tree.nodemap[elem] = parent
tree.nodes.append(elem)
if elem.ns is not None:
if elem.ns not in tree.tags:
tree.tags[elem.ns] = {}
tree.tags[elem.ns][elem.localName] = elem
return elem
def elementFromText(tree, text, parent=None):
current = currentTag(text)
if current:
return elementFromTag(tree, current, parent)
return None
class iterparse:
def __init__(self, content, events=("start", "end", "start-ns", "end-ns")):
self.content = content
self._events = events
self.err = None
self.err_info = None
self.root = None
def __iter__(self):
events = []
b = TreeBuilder()
p = XMLParser(b)
p._setevents(events, self._events)
try:
p.feed(self.content)
except SyntaxError as e:
self.err = e
self.err_info = (
p.CurrentLineNumber, p.CurrentColumnNumber, p.CurrentByteIndex)
for event in events:
yield event
del events[:]
try:
self.root = p.close()
except SyntaxError as e:
# if we had a previous syntax error, keep it
if not self.err:
self.err = e
self.err_info = (
p.CurrentLineNumber, p.CurrentColumnNumber, p.CurrentByteIndex)
for event in events:
yield event
def bisect_left_nodes_start(a, x, lo=0, hi=None):
if hi is None:
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
# print "comparing", a[mid].start[:2], "and", x
if a[mid].start is None:
return mid
if a[mid].start[:2] == x: return mid
if a[mid].start[:2] < x: lo = mid+1
else:
hi = mid
return lo
class XMLDocument(object):
def __init__(self, content=None):
self.content = content
self.reset()
if self.content:
self.getDoctype()
def getDoctype(self):
self.doctype = getdoctype(self.content)
if self.doctype:
self.publicId = self.doctype[2]
self.systemId = self.doctype[3]
def reset(self):
self.doctype = None
self.publicId = None
self.systemId = None
self.err = None
self.err_info = None
self.root = None
self.current = None
self._rootnodes = []
self.nodes = [] # flat list of all nodes
self.tags = {} # { namespace_uri: { tag_local_name: elem, ...} , ...}
self.nodemap = {} # {child_elem: parent_elem, ... }
self.namespaces = [] # flat list of namespace uri's
self.nsmap = {} # { "http:/...": "xslt", ... }
self.prefixmap = {} # { "xslt": "http://.....", ... }
def getRoots(self):
# return a list of all nodes that have no parent
if not self._rootnodes:
self._rootnodes = [
node for node in self.nodemap if self.nodemap[node] is None]
return self._rootnodes
def namespace(self, elem):
# print "%s:%s xmlns[%s]"%(self.prefix(elem),elem.localName,elem.ns)
if hasattr(elem, "ns") and elem.ns:
return elem.ns
return self.nsmap.get("")
def parent(self, elem):
return self.nodemap.get(elem)
def qname(self, name):
if name and name[0] == '{':
ns, ln = name[1:].split('}')
prefix = self.nsmap.get(ns)
if prefix:
return "%s:%s" % (prefix, ln)
return ln
return name
def isAncestorOf(self, node, child):
# print "asking if %r is an ancestor of %r" %( node, child)
currentParent = self.parent(child)
while currentParent != child and currentParent is not None:
# print "\tparent =", currentParent
if node == currentParent:
# print "-->is a parent"
return True
currentParent = self.parent(currentParent)
# print "-->isn't a parent"
return False
def locateNode(self, line, col):
# nodes are 1-indexed, so we need to switch our indexing scheme
line += 1
# first look for the last node to start at or before the current
# position
idx = bisect_left_nodes_start(self.nodes, (line, col))-1
if idx < 0:
if self.nodes:
return self.nodes[0]
return None
assert idx < len(self.nodes)
node = self.nodes[idx]
# that was easy. Now we may need to move up the parent chain
# from this node if we are past the end of a node but before
# the beginning of another, e.g. <foo><bar>asd</bar>|</foo>
# -- the right node is foo, but the current value of node is 'bar'
startPos = node.start[:2]
if startPos is None: # if we're in a partial node, that's it
return node
if startPos[:2] == (line, col): # if it's an exact match, that's it
return node
# if idx == 0: return node # if we're at the toplevel, so be it
while node is not None:
while node.end:
# move up the parent chain until you get a parent
# whose end is after the current location
last_line, last_col = node.end[:2]
if (last_line, last_col) < (line, col):
node = self.parent(node)
if node is None:
return node
continue
break
if node is not None and not node.end:
# check it's parents and see if they have end markers
pnode = self.parent(node)
while pnode:
if pnode.end:
last_line, last_col = pnode.end[:2]
if (last_line, last_col) < (line, col):
node = pnode
break
pnode = self.parent(pnode)
if node.end:
continue
break
return node
def prefixFromNS(self, ns):
if self.prefixmap.get("") == ns:
return ""
prefix = self.nsmap.get(ns)
if not prefix:
prefix = self.nsmap.get(self.root.ns)
return prefix
def prefix(self, elem):
if not hasattr(elem, "ns") or not elem.ns:
return ""
return self.prefixFromNS(elem.ns)
def tagname(self, elem):
prefix = self.prefix(elem)
if prefix:
return "%s:%s" % (prefix, elem.localName)
return elem.localName
_endtagRe = re.compile(r"(</(\w+:)?\w+>)", re.U)
def parse(self, content=None):
self.reset()
self.content = content
if content:
# first, find the doctype decl
self.getDoctype()
elif not self.content:
raise Exception("no content to parse")
elstack = [None]
self.current = None
tags = {}
last_pos_ok = None
iter = iterparse(self.content)
for event, elem in iter:
if event == "start":
# print "%r %r %d %d %d" % (event, elem, elem.start[0],
# elem.start[1], elem.start[2])
self.nodemap[elem] = self.current
self.nodes.append(elem)
if elem.ns not in self.tags:
self.tags[elem.ns] = {}
self.tags[elem.ns][elem.localName] = elem
elstack.append(elem)
self.current = elem
elif event == "end":
# print "%r %r %r %r" % (event, elem, elem.start, elem.end)
if elem.end:
try:
pos = elem.end[2]
# print " len %d pos %d" % (len(self.content), pos)
# put the end location at the end of the end tag
m = self._endtagRe.match(self.content[pos:])
if m and m.groups():
pos = pos + m.end(1)
if pos > 0:
# we want to be after the ">"
diff = pos - elem.end[2] + 1
elem.end = (elem.end[
0], elem.end[1] + diff, pos)
except IndexError as e:
# XXX FIXME BUG 56337
log.exception(e)
pass
node = elstack.pop()
if elstack[-1] is None:
self._rootnodes.append(node)
self.current = elstack[-1]
elif event == "start-ns":
self.namespaces.append(elem)
self.prefixmap[elem[0]] = elem[1]
self.nsmap[elem[1]] = elem[0]
elif event == "end-ns":
self.namespaces.pop()
self.root = iter.root
self.err = iter.err
self.err_info = iter.err_info
# set the root if we can
if self.root is None and self.nodes:
self.root = self.nodes[0]
self.end_error(self.content)
# if we still do not have a root, do it
# now, as we should have a node
if self.root is None and self.nodes:
self.root = self.nodes[0]
# release content
self.content = None
def end_error(self, content):
if not self.err_info:
return
if not content:
raise Exception("No content?")
# create an element for the last part of the parse
parent = self.current
if self.err_info[2] >= 0:
start = self.err_info[2]
else:
# slower
# print self.err_info
p = 0
for i in range(self.err_info[0] - 1):
# use re.search("\r|\n|\r\n")
p = content.find("\n", p + 1)
start = p + self.err_info[1] + 1
end = content.find("<", start+1)
if end <= start:
end = len(content)
# fixup the start position
start = content.rfind(">", 0, start) + 1
if start >= end:
return
# print self.err_info
# print content[start:end]
current = currentTag(content[start:end])
if not current:
return
# print "%s:%s %r %d" % current
# fix error info
start = start+current[3]
line = content.count('\n', 0, start)
col = start - content.rfind('\n', 0, start)
self.err_info = (line, col, start)
self.current = elem = elementFromTag(self, current, parent)
def dump(self):
print("error ", self.err)
print("error_info ", self.err_info)
print("%d nodes created" % len(self.nodemap))
print("doctype ", self.doctype)
print("publicId ", self.publicId)
print("systemId ", self.systemId)
print(self.prefixmap)
print(self.nsmap)
print("root ", self.root)
if self.root:
print("root tag ", self.root.tag)
print("root ns ", self.root.ns)
print("root localName ", self.root.localName)
print("root start ", self.root.start)
print("root end ", self.root.end)
print("tree.current ", self.current)
import HTMLTreeParser
class HTMLDocument(XMLDocument):
def parse(self, content=None):
if content:
self.reset()
self.content = content
# first, find the doctype decl
self.getDoctype()
elif not self.content:
raise Exception("no content to parse")
p = HTMLTreeParser.Parser(HTMLTreeParser.HTMLTreeBuilder())
p.feed(content)
self.root = p.close()
self.nodes = p._builder.nodes
self.nodemap = p._builder.nodemap
self._rootnodes = p._builder._rootnodes
self.current = p._builder.current
class TreeService:
__treeMap = {} # map uri to elementtree
def __init__(self):
pass
def treeFromCache(self, uri):
if uri in self.__treeMap:
# print "tree cache hit for [%s]"%uri
return self.__treeMap[uri]
return None
def getTreeForURI(self, uri, content=None):
if not uri and not content:
return None
tree = None
if uri and uri in self.__treeMap:
tree = self.__treeMap[uri]
# if tree is not None:
# print "tree cache hit for [%s]"%uri
if not content:
return tree
if not tree:
if not content:
# get the content
try:
f = open(uri, 'r')
content = f.read(-1)
f.close()
except IOError as e:
# ignore file errors and return an empty tree
content = ""
if not content.startswith("<?xml"):
tree = HTMLDocument()
if not tree:
tree = XMLDocument()
# raise Exception("NOT IMPLEMENTED YET")
if content:
tree.parse(content)
if uri:
self.__treeMap[uri] = tree
return tree
def getTreeForContent(self, content):
return self.getTreeForURI(None, content)
__treeservice = None
def getService():
global __treeservice
if not __treeservice:
__treeservice = TreeService()
return __treeservice
if __name__ == "__main__":
import sys
# basic logging configuration
console = logging.StreamHandler()
console.setLevel(logging.INFO)
# set a format which is simpler for console use
formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')
# tell the handler to use this format
console.setFormatter(formatter)
# add the handler to the root logger
logging.getLogger('').addHandler(console)
bigfile = "/Users/shanec/main/Apps/Komodo-devel/test/bigfile.xml"
fn = "/Users/shanec/main/Apps/Komodo-devel/src/samples/xslt_sample.xsl"
from elementtree.ElementTree import tostring
if 0:
# fn = "/Users/shanec/main/Apps/Komodo-devel/src/install/wix/feature-
# core.wxs"
t1 = time.clock()
tree = getService().getTreeForURI(bigfile)
t2 = time.clock()
print("cElementTree took ", (t2-t1))
tree.dump()
if 0:
f = open(bigfile, 'r')
content = f.read(-1)
f.close()
t1 = time.clock()
tree = HTMLDocument()
tree.parse(content)
t2 = time.clock()
print("HTMLBuilder took ", (t2-t1))
if 0:
print(currentTag("<xsl"))
print(currentTag("<xsl:"))
print(currentTag("<xsl:tag"))
print(currentTag("text><xsl:tag"))
# print nodemap
html = """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN" "http://www.w3.org/TR/xhtml11/DTD/xhtml11.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
"""
tree = getService().getTreeForURI("Text.html", html)
print(tostring(tree.root))
html = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<HEAD>
<TITLE>Mozilla Cross-Reference</TITLE>
<link HREF=http://www.activestate.com/global.css rel="stylesheet" type="text/css">
</HEAD>
<BODY BGCOLOR="#FFFFFF" TEXT="#000000"
LINK="#0000EE" VLINK="#551A8B" ALINK="#FF0000">
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td>
<table width="100%" border="0" cellspacing="0" cellpadding="0">
<tr>
<td width="145"><a href=http://www.activestate.com/index.html><img src=http://www.activestate.com/img/Main_Logo_Border.gif width="167" height="66" border="0" alt="ActiveState Tool Corp."></a></td>
<td bgcolor="#000000" colspan=2 width="90%" align="center"><img src=http://www.activestate.com/img/Main_Banner.gif alt="Programming for the People."></td>
</tr>
</table>
<table width="100%" bgcolor="#000000" border="0" cellpadding="0" cellspacing="0">
<tr>
<td width="600">
<table width="600" border="0" cellpadding="0" cellspacing="3">
<tr>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Products/index.html>Products</a></td>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Support/index.html>Support</a></td>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Corporate/index.html>About Us</a></td>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Contact_Us.html>Contact</a></td>
<td class="mainnav" bgcolor="#C2B266" width="100" align="center"><a href=http://www.activestate.com/Site_Map.html>Site Map</a></td>
</tr>
</table>
</td>
<td class="mainnav" width="100%">
<table width="100%" border="0" cellpadding="0" cellspacing="0">
<tr>
<td class="mainnav" bgcolor="#C2B266" width="100%"> </td>
<td class="mainnav" bgcolor="#000000" width="3"> </td>
</tr>
</table>
</td>
</tr>
</table>
</td>
</tr>
</table>
<I>$treename</I>
<P>
"""
tree = getService().getTreeForURI("Text.html", html)
print(tostring(tree.root))
html = """<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN">
<HTML>
<BODY>
<FORM><FIELDSET ><SELECT class=""><OPTGROUP >
"""
tree = getService().getTreeForContent(html)
tree = getService().getTreeForURI("newfile.txt", "")
tree = getService().getTreeForURI("newfile.txt", "<html>")
tree = getService().getTreeForURI("newfile.txt", "<html> <")
node = tree.locateNode(tree.current.start[0], tree.current.start[1])
assert node == tree.current, "locateNode returned incorrect node"
tree = getService().getTreeForURI("newfile.txt", "<table></table>\n\n\n\n")
node = tree.locateNode(2, 0)
assert node is None, "locateNode returned incorrect node"
node = tree.locateNode(0, 7)
assert node is not None, "locateNode returned incorrect node"
sys.exit(0)
xml = """
<c1><c2 a1="1" a2='1' a3='val'><e1 /><e2 f1="1" f2 = '33' /><c3 a='1'>blah</c3></c2 > </"""
tree = getService().getTreeForContent(xml)
node = tree.locateNode(tree.current.start[0], tree.current.start[1])
assert node == tree.current, "locateNode returned incorrect node"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xxmlns="xyz" xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<xsl:template match="Class">
<html> <xsl:apply-imports/>
<xsl:
<xsl:apply-templates select="Order"/>
</html>
</xsl:template>
"""
tree = getService().getTreeForContent(xml)
node = tree.locateNode(tree.current.start[0], tree.current.start[1])
assert node == tree.current, "locateNode returned incorrect node"
# ensure we get the correct current node
xml = """<?xml version="1.0"?>
<!DOCTYPE window PUBLIC "-//MOZILLA//DTD XUL V1.0//EN" "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<window xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<popupset id="editorTooltipSet">
<popup type="tooltip" id="editorTooltip" flex="1">
<description multiline="true" id="editorTooltip-tooltipText" class="tooltip-label" flex="1"/>
</popup><
<popup type="autocomplete" id="popupTextboxAutoComplete"/>
</popupset>
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "popupset", "current element is incorrect"
# ensure we get the correct current node
xml = """<?xml version="1.0"?>
<!DOCTYPE window PUBLIC "-//MOZILLA//DTD XUL V1.0//EN" "http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<window xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">
<popupset id="editorTooltipSet">
<popup type="tooltip" id="editorTooltip" flex="1">
<description multiline="true" id="editorTooltip-tooltipText" class="tooltip-label" flex="1"/>
</popup> <
<popup type="autocomplete" id="popupTextboxAutoComplete"/>
</popupset>
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "popupset", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<
<xsl:template/>
"""
tree = getService().getTreeForContent(xml)
assert tree.current == tree.root, "current element is incorrect"
assert tree.current.localName == "stylesheet", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<xsl:"""
tree = getService().getTreeForContent(xml)
assert tree.current.tag == "{http://www.w3.org/1999/XSL/Transform}", "current element is incorrect"
assert tree.current.localName == "", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "stylesheet", "current element is incorrect"
xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html """
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "html", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<xsl:template
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "template", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/><xsl:template"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "template", "current element is incorrect"
xml = """<?xml version="1.0"?>
<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
<xsl:output method="xml" indent="yes"/>
<xsl:
<xsl:template/>
"""
tree = getService().getTreeForContent(xml)
assert tree.current.localName == "", "current element is incorrect"
assert tree.current.tag == "{http://www.w3.org/1999/XSL/Transform}", "current element is incorrect"
html = """<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html><body><p><ul><li><li><li></ul></body>
"""
tree = getService().getTreeForContent(html)
# print tostring(tree.root)
assert tree.current.localName == "html", "current element is incorrect"
html = """<!DOCTYPE h:html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<h:html xmlns:h='urn:test'"""
tree = getService().getTreeForContent(html)
# print tostring(tree.root)
assert tree.current.localName == "html", "current element is incorrect"
# from cElementTree import Element
# tag = u"{urn:test}test"
# print tag
# e = Element(tag, {})
# print e.localName
# print e.tag
xml = """<?xml version="1.0" encoding="UTF-8"?>
<!-- This sample XML file shows you ... -->
<Class>
<Order Name="TINAMIFORMES">
<Family Name="TINAMIDAE">
<Species attr="value">content.</Species>
<![CDATA[
This is a CDATA section
]]>
</Family>
</Order>
"""
tree = getService().getTreeForContent(xml)
# print tostring(tree.root)
assert len(tree.root[0][0][0]) == 0, "bad parent/child relationship"
xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html>
<body
<!-- a comment -->
<title>
</title>
</html>
"""
tree = getService().getTreeForContent(xml)
# print tostring(tree.root)
assert tree.current.localName == "body", "current element is incorrect"
assert tree.parent(
tree.current).localName == "html", "current element is incorrect"
xml = """<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">
<html
<body
"""
tree = getService().getTreeForContent(xml)
# print tostring(tree.root)
assert tree.current.localName == "html", "current element is incorrect"
| true | true |
f73ae0439a55adc2eb0dee40fd68949a88eaf15a | 532 | py | Python | router_logic/search.py | sdulaney/project-paul-eggtart | 52f6b099615df31ee40f161c14bfd13f0ea59370 | [
"AFL-3.0"
] | null | null | null | router_logic/search.py | sdulaney/project-paul-eggtart | 52f6b099615df31ee40f161c14bfd13f0ea59370 | [
"AFL-3.0"
] | 2 | 2021-02-08T20:45:03.000Z | 2021-04-30T21:08:59.000Z | router_logic/search.py | sdulaney/project-paul-eggtart | 52f6b099615df31ee40f161c14bfd13f0ea59370 | [
"AFL-3.0"
] | null | null | null | from flask_wtf import FlaskForm
from wtforms import TextAreaField, StringField, validators
from wtforms.validators import DataRequired
from fuzzywuzzy import fuzz, process
from TA_functions import *
def closest_match(search):
choices = get_ta_list()
return process.extract(search, choices, limit=1)
def closest_5_match(search):
choices = get_ta_list()
return [name[0] for name in process.extract(search, choices, limit=5)]
class searchBar(FlaskForm):
ta_name = StringField('ta_name', [validators.Length(min=1, max=50)])
| 29.555556 | 72 | 0.785714 | from flask_wtf import FlaskForm
from wtforms import TextAreaField, StringField, validators
from wtforms.validators import DataRequired
from fuzzywuzzy import fuzz, process
from TA_functions import *
def closest_match(search):
choices = get_ta_list()
return process.extract(search, choices, limit=1)
def closest_5_match(search):
choices = get_ta_list()
return [name[0] for name in process.extract(search, choices, limit=5)]
class searchBar(FlaskForm):
ta_name = StringField('ta_name', [validators.Length(min=1, max=50)])
| true | true |
f73ae0753f3b06383b38193a5fcf041fd0a05384 | 492 | py | Python | proyectos_de_ley/pdl/migrations/0002_proyecto_legislatura.py | napsta32/proyectos_de_ley | 63b7737e194a0958f9e95ca92773887000867bc7 | [
"MIT"
] | 12 | 2016-07-27T06:23:52.000Z | 2021-09-08T16:09:52.000Z | proyectos_de_ley/pdl/migrations/0002_proyecto_legislatura.py | napsta32/proyectos_de_ley | 63b7737e194a0958f9e95ca92773887000867bc7 | [
"MIT"
] | 58 | 2015-01-18T14:53:45.000Z | 2021-02-19T06:27:19.000Z | proyectos_de_ley/pdl/migrations/0002_proyecto_legislatura.py | napsta32/proyectos_de_ley | 63b7737e194a0958f9e95ca92773887000867bc7 | [
"MIT"
] | 10 | 2015-01-28T02:20:38.000Z | 2020-11-22T06:23:26.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-09-02 20:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pdl', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='proyecto',
name='legislatura',
field=models.IntegerField(default=2011, max_length=4),
preserve_default=False,
),
]
| 22.363636 | 66 | 0.611789 |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pdl', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='proyecto',
name='legislatura',
field=models.IntegerField(default=2011, max_length=4),
preserve_default=False,
),
]
| true | true |
f73ae17099d28d789a44f0f1541f2ddc00471769 | 389 | py | Python | seleet/asgi.py | BuildForSDGCohort2/seleet-backend | 17830240936d294d67d5b7a9c750ff30946fc1b6 | [
"BSD-3-Clause"
] | null | null | null | seleet/asgi.py | BuildForSDGCohort2/seleet-backend | 17830240936d294d67d5b7a9c750ff30946fc1b6 | [
"BSD-3-Clause"
] | null | null | null | seleet/asgi.py | BuildForSDGCohort2/seleet-backend | 17830240936d294d67d5b7a9c750ff30946fc1b6 | [
"BSD-3-Clause"
] | null | null | null | """
ASGI config for seleet project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'seleet.settings')
application = get_asgi_application()
| 22.882353 | 78 | 0.784062 |
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'seleet.settings')
application = get_asgi_application()
| true | true |
f73ae1906aa870e9491ee68db1750a27397161c8 | 40,223 | py | Python | lib/dashboard.py | thebeastadi/explainx | a96d5bc2b78d14bb9a60b3cca3e84b8fba846944 | [
"MIT"
] | 1 | 2020-12-25T14:33:40.000Z | 2020-12-25T14:33:40.000Z | lib/dashboard.py | thebeastadi/explainx | a96d5bc2b78d14bb9a60b3cca3e84b8fba846944 | [
"MIT"
] | null | null | null | lib/dashboard.py | thebeastadi/explainx | a96d5bc2b78d14bb9a60b3cca3e84b8fba846944 | [
"MIT"
] | null | null | null | from imports import *
from plotly_graphs import *
from protodash import *
from insights import *
from plotly_css import *
import pandasql as psql
import string
import random
import os
from apps import global_explanation, local_explanation, distribution, feature_interaction, cohort
from app import app
from what_if import *
from calculate_shap import *
from analytics import Analytics
from cohort_analysis import *
class dashboard():
def __init__(self):
super(dashboard, self).__init__()
self.param = None
self.query_dict = dict()
self.filtered_dataframe = dict()
self.feature_importance = dict()
self.pdp = dict()
self.summary_plot = dict()
self.feature_impact = dict()
self.multi_level_eda = dict()
def create_dir(self, dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def random_string_generator(self):
random_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
return random_str
def caching_data_exist_in_dict(self, sql_query, graph_type):
if graph_type == 'feature_importance':
if sql_query in self.feature_importance:
return True, self.feature_importance[sql_query]
return False, None
elif graph_type == 'pdp':
if sql_query in self.pdp:
return True, self.pdp[sql_query]
return False, None
elif graph_type == 'summary_plot':
if sql_query in self.summary_plot:
return True, self.summary_plot[sql_query]
return False, None
elif graph_type == 'feature_impact':
if sql_query in self.feature_impact:
return True, self.feature_impact[sql_query]
return False, None
elif graph_type == 'multi_level_eda':
if sql_query in self.multi_level_eda:
return True, self.multi_level_eda[sql_query]
return False, None
elif graph_type == 'filtered_df':
if sql_query in self.query_dict:
return True, self.query_dict[sql_query]
return False, None
def caching_exists_in_file(self, sql_query, graph_type):
self.create_dir("data_storage")
self.create_dir("data_storage/{}".format(graph_type))
try:
dictionary_csv = pd.read_csv("./data_storage/filtered_df/dictionary_bkp.csv")
random_id = dictionary_csv[(dictionary_csv['sql'] == sql_query) & (dictionary_csv['type'] == graph_type)
& (dictionary_csv['instance_id'] == self.instance_id)]
random_id.drop_duplicates(['sql'], keep='last', inplace=True)
if not random_id.empty:
result = random_id['random_id'].iloc[0]
dff = pd.read_csv("./data_storage/{}/{}.csv".format(graph_type, result))
# print("{} {} {}".format(graph_type, "file exists", result))
return True, True, dff
return False, True, None
except Exception as e:
return False, False, None
def creating_filtered_backup_file(self, sql_query, random_str, graph_type):
dict_bkp = pd.DataFrame(data={"sql": [sql_query], "random_id": [random_str], "type": [graph_type],
"instance_id": [self.instance_id]})
return dict_bkp
def store_data_in_csv(self, df, graph_type, random_str):
df.to_csv("./data_storage/{}/{}.csv".format(graph_type, random_str), index=False)
def store_data_in_dict(self, df, sql_query, graph_type):
if graph_type == 'feature_importance':
self.feature_importance[sql_query] = df
elif graph_type == 'pdp':
self.pdp[sql_query] = df
elif graph_type == 'summary_plot':
self.summary_plot[sql_query] = df
elif graph_type == 'feature_impact':
self.feature_impact[sql_query] = df
elif graph_type == 'multi_level_eda':
self.multi_level_eda[sql_query] = df
elif graph_type == 'filtered_df':
self.query_dict[sql_query] = df
def caching_data_manager(self, df, sql_query, graph_type, calculated_funct=None, details_dict=None):
status_file, file_exist, dff = self.caching_exists_in_file(sql_query, graph_type)
if status_file:
# print("{}/{}".format(graph_type, "exist in file"))
return dff
else:
# print("{}/{}".format(graph_type, "don't exists"))
random_str = self.random_string_generator()
dict_bkp = self.creating_filtered_backup_file(sql_query, random_str, graph_type)
dff = psql.sqldf(sql_query, locals())
self.create_dir("data_storage/filtered_df")
dict_bkp.to_csv("./data_storage/{}/dictionary_bkp.csv".format("filtered_df"), mode='a',
header=file_exist is False,
index=False)
dff.to_csv("./data_storage/filtered_df/{}.csv".format(random_str), mode='w',
header=file_exist is False,
index=False)
results = dff
if graph_type != 'filtered_df':
results = calculated_funct(dff, self.param["is_classification"])
self.store_data_in_csv(results, graph_type, random_str)
return results
def find(self, df, mode, param):
self.available_columns = available_columns = list(df.columns)
original_variables = [col for col in df.columns if '_impact' in col]
self.impact_variables = [col for col in original_variables if not '_rescaled' in col]
self.y_variable = param["y_variable"]
self.y_variable_predict = param["y_variable_predict"]
self.param = param
self.instance_id = self.random_string_generator()
self.create_dir("data_storage")
self.create_dir("data_storage/user")
self.user_id = None
self.df = df
self.analytics = Analytics()
self.analytics['ip'] = self.analytics.finding_ip()
self.analytics['mac'] = self.analytics.finding_address()
self.analytics['instance_id'] = self.instance_id
self.analytics['time'] = str(datetime.datetime.now())
self.analytics['total_columns'] = len(self.available_columns)
self.analytics['total_rows'] = len(self.df)
self.analytics['os'] = self.analytics.finding_system()
self.analytics['model_name'] = self.param["model_name"]
self.analytics["function"] = 'explainx.ai'
self.analytics["query"] = "all"
self.analytics['finish_time'] = ''
self.callback_input = [Input(f + '_slider', 'value') for f in self.param["columns"]]
self.callback_input.append(Input('submit-button-state', 'n_clicks'))
# self.callback_input_prototype = [Input(f + '-slider', 'value') for f in self.param["columns"]]
# self.callback_input_prototype.append(Input('btn-nclicks-1', 'n_clicks'))
self.prototype_array = []
for f in self.param["columns"]:
self.prototype_array.append([f + '_slider', 'value'])
self.prototype_array.append(['btn-nclicks-1', 'n_clicks'])
try:
user_id = pd.read_csv("data_storage/user/user_id.csv")
user_id.drop_duplicates(['id'], keep='first', inplace=True)
user_id = user_id['id'].iloc[0]
self.user_id = user_id
except Exception as e:
# print("inside user track" )
user_id_val = self.random_string_generator()
user_id_csv = pd.DataFrame(data={"id": [user_id_val]})
user_id_csv.to_csv("data_storage/user/user_id.csv", index=False)
self.user_id = user_id_val
self.analytics['user_id'] = self.user_id
self.analytics.insert_data()
self.insights = insights(self.param)
d = self.dash(df, mode)
return True
def dash(self, df, mode):
y_variable = self.y_variable
g = plotly_graphs()
ca = cohortAnalysis()
y_variables = [col for col in df.columns if '_impact' in col]
original_variables = [col for col in df.columns if not '_impact' in col]
original_variables = [col for col in original_variables if not '_rescaled' in col]
array = []
PAGE_SIZE = 10
row_number = 1
# y_variable = "predict"
columns = ['index', '0', '1', '2', '3', '4']
dat = []
available_columns = list(df.columns)
# external_stylesheets = [dbc.themes.BOOTSTRAP,
# # {
# # 'href': 'https://stackpath.bootstrapcdn.com/bootstrap/4.5.0/css/bootstrap.min.css',
# # 'rel': 'stylesheet'
# # }
# ]
# app = JupyterDash(__name__, external_stylesheets=external_stylesheets)
app.title = "explainX.ai - Main Dashboard"
PLOTLY_LOGO = "https://i.ibb.co/ZTWtVDV/explainx-logo.png"
menu = dbc.Row(
[
dbc.Col(dbc.NavItem(dbc.NavLink("Home", href="/apps/")),
style={'width': "150px", 'fontSize': '10px'}),
dbc.Col(dbc.NavItem(dbc.NavLink("Global Explanation", href="/apps/global_explanation")),
style={'width': "150px", 'fontSize': '10px'}),
dbc.Col(dbc.NavItem(dbc.NavLink("Local Explanation", href="/apps/local_explanation")),
style={'width': "150px", 'fontSize': '10px'}),
dbc.Col(dbc.NavItem(dbc.NavLink("Feature Interaction", href="/apps/feature_interaction")),
style={'width': "150px", 'fontSize': '10px'}),
dbc.Col(dbc.NavItem(dbc.NavLink("Distributions", href="/apps/distribution")),
style={'width': "150px", 'fontSize': '10px'}),
dbc.Col(dbc.NavItem(dbc.NavLink("Cohort Analysis", href="/apps/cohort")),
style={'width': "150px", 'fontSize': '10px'})
],
no_gutters=True,
className="ml-auto flex-nowrap mt-3 mt-md-0",
align="center"
)
navbar = dbc.Navbar(
[
html.A(
# Use row and col to control vertical alignment of logo / brand
dbc.Row(
[
dbc.Col(html.Img(src=PLOTLY_LOGO, height="30px")),
dbc.Col(dbc.NavbarBrand("explainX.ai", className="ml-2",
style={'fontSize': '15px', 'color': 'black'})),
],
align="center",
no_gutters=True,
),
href="https://www.explainx.ai",
),
dbc.NavbarToggler(id="navbar-toggler"),
dbc.Collapse(menu, id="navbar-collapse", navbar=True),
],
color="light",
dark=True,
)
# add callback for toggling the collapse on small screens
@app.callback(
Output("navbar-collapse", "is_open"),
[Input("navbar-toggler", "n_clicks")],
[State("navbar-collapse", "is_open")],
)
def toggle_navbar_collapse(n, is_open):
if n:
return not is_open
return is_open
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/apps/global_explanation':
return global_explanation.global_explanation(original_variables)
elif pathname == '/apps/feature_interaction':
return feature_interaction.layout_interaction(original_variables, y_variables)
elif pathname == '/apps/distribution':
return distribution.layout_distribution(original_variables)
elif pathname == '/apps/local_explanation':
return local_explanation.layout_local(original_variables, columns, df.columns)
elif pathname == '/apps/cohort':
return cohort.cohort_layout(original_variables)
else:
return welcome_message
welcome_message = html.Div(
[
html.Div([
html.H3("Welcome to ExplainX.ai Explainable AI Dashboard"),
html.H4("Start explaining your model by exploring one of the following options.")
], style={'margin-left': "20px"}),
html.Div([
html.Div([
html.Img(
src='https://lh3.googleusercontent.com/uNkpoLQRIza6SSDk9fQed6bu6c1Q9zDkDjZqkRtF3cU97Smf2rS0soKo2ZQm32gJe8FyLYvNRUYJ-hqM7zs4esIAFchdPmgC4sHgbpxTKkGjm4dRK-NagI56K8vnXg8FH4s_Jct2RaQ',
className="global_explanation_image"),
dcc.Link("Global Explanation", href="/apps/global_explanation", className="link")
], className="welcome_box", id="global_explanation_deets"),
html.Div([
html.Img(
src='https://lh3.googleusercontent.com/LpM-N2QavBVxlMW6vKm5fmT5Qe3mYtI2q2OQc5kgZfW4kKOFlyidgrwT5C9zB3XHZncHvlGcFl82Si7bMDQN_PtLf4PThqdEUs1UwoUPeNHs0VYY6ICKlHi44ppZLZIMQvaWI4bMqm0',
className="global_explanation_image"),
dcc.Link("Local Level Explanation", href="/apps/local_explanation", className="link")
], className="welcome_box", id="local_explanation_deets"),
html.Div([
html.Img(
src='https://lh6.googleusercontent.com/GXnNtGlqQcsRXugwwlZ31SwrJda4Z1WVyIi5i13vZImrUY6YgIZvM81d7zrBOh8n2jK_Lqj3BB5IqS4dU5l680-rZuu40bMXa-EAd8ag3WDIU2SNYieg0DH3e_FwUq5f2-y-X_POE-k',
className="global_explanation_image"),
dcc.Link("Feature Interactions", href="/apps/feature_interaction", className="link")
], className="welcome_box", id="feature_interaction_deets"),
html.Div([
html.Img(
src='https://uploads-ssl.webflow.com/5edc18e72f7ba21e1ed2efae/5f519fd99580494301e67b7b_pipeline.png',
className="global_explanation_image"),
dcc.Link("Cohort Analysis", href="/apps/cohort", className="link")
], className="welcome_box", id="cohort_analysis_deets")
], className="main_welcome_div"),
]
)
app.layout = html.Div([
navbar,
html.Div([
dbc.Card(
[
dbc.CardHeader(
html.H2(
dbc.Button(
"Analyze using SQL",
id="collapse-button-2",
color="link",
style={'fontSize': '10px', 'margin-top': '0px'}), style={"margin-top": "0px"}),
style={"height": "50px"}),
dbc.Collapse(html.Div([
html.Div(dcc.Input(
id='input-on-submit',
type='text',
# placeholder="SELECT * FROM df"
value="SELECT * FROM df",
style={'height': '200px', 'width': '700px', 'fontSize': '10px'})),
html.Button('Execute Query', id='submit-val', n_clicks=1),
html.Div(id='sql-query-button',
children='Enter a value and press submit',
style={'display': 'none'}
)
], style={'marginTop': 0}), id="collapse-2"),
]
),
], style=style4),
html.Div([
dbc.Card(
[
dbc.CardHeader(
html.H2(
dbc.Button(
"View Your Data",
id="collapse-button",
color="link",
style={'fontSize': '10px'}), style={"margin-top": "0px"}),
style={"height": "50px"}),
dbc.Collapse(html.Div([
html.H4('',
style=style1),
html.Div([
dash_table.DataTable(
id='datatable-interactivity',
columns=[
{"name": i, "id": i, "deletable": False, "selectable": True} for i in df.columns
],
# data=df.to_dict('records'),
editable=True,
sort_mode="multi",
# selected_columns=[],
# selected_rows=[],
# page_action="native",
page_current=0,
page_size=PAGE_SIZE,
row_selectable='multi',
page_action='custom',
style_table=style2,
style_header=style3,
style_cell={
"font-family": "Helvetica, Arial, sans-serif",
"fontSize": "11px",
'width': '{}%'.format(len(df.columns)),
'textOverflow': 'ellipsis',
'overflow': 'hidden',
'textAlign': 'left',
'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
},
css=[
{
'selector': '.dash-spreadsheet td div',
'rule': '''
line-height: 15px;
max-height: 20px; min-height: 20px; height: 20px;
display: block;
overflow-y: hidden;
'''
}])
])
], style={'marginTop': 0}), id="collapse"),
]
),
],
style=style4),
# Pages Data
html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
# end of collapsable div
], className="main_div")
# Navigation
# Data Interactivity
@app.callback(
Output('datatable-interactivity', 'data'),
[Input('datatable-interactivity', "page_current"),
Input('datatable-interactivity', "page_size")])
def update_table(page_current, page_size):
return df.iloc[
page_current * page_size:(page_current + 1) * page_size
].to_dict('records')
# Collapse-Toggle
@app.callback(
Output("collapse", "is_open"),
[Input("collapse-button", "n_clicks")],
[State("collapse", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
# Collapse-Toggle 2
@app.callback(
Output("collapse-2", "is_open"),
[Input("collapse-button-2", "n_clicks")],
[State("collapse-2", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
# Cohort Analysis - Callbacks
@app.callback(
Output("modal", "is_open"),
[Input("open", "n_clicks"), Input("close", "n_clicks")],
[State("modal", "is_open")],
prevent_initial_call=True)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
[Output("cohort-metrics-div", "children"),
Output("cohort-details", "children"),
Output("cohort-graph", "children")],
[Input("add-cohort", "n_clicks"),
Input("remove-cohort", "n_clicks"),
Input("x-axis", "value")],
[State("demo-dropdown", "value"),
State("demo-operators", "value"),
State("demo-values", "value")])
def cohort_metrics_details(add_cohort, remove_cohort, x_axis, var_name, operator, value):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'remove-cohort' in changed_id:
ca.remove_cohort()
fig = ca.cohort_graph(x_axis)
return ca.cohort_metrics_details(), ca.cohort_details(), dcc.Graph(figure=fig),
else:
ca.add_cohort_metrics(df, var_name, operator, value, self.param["is_classification"])
cohort = ca.add_cohort(df, x_axis, var_name, operator, value)
fig = ca.cohort_graph(x_axis)
return ca.cohort_metrics_details(), ca.cohort_details(), dcc.Graph(figure=fig)
# SQL - Data Input Callback
@app.callback(
dash.dependencies.Output('sql-query-button', 'children'),
[dash.dependencies.Input('submit-val', 'n_clicks')],
[dash.dependencies.State('input-on-submit', 'value')])
def update_output(n_clicks, value):
sql_query = f'{value}'
return sql_query
# What-If Form CallBack
@app.callback(
Output('place_form_here', 'children'),
[Input('row_number', 'value')])
def create_what_if_form(row_number):
self.analytics.update_data()
self.analytics['function'] = "what_if"
self.analytics['time'] = str(datetime.datetime.now())
self.analytics['query'] = row_number
self.analytics.insert_data()
self.analytics['finish_time'] = ''
x = what_if()
i = 0
if type(row_number) == type(1):
i = row_number
array = df[i:i + 1]
array1 = array
impact_variables = [col for col in array if '_impact' in col]
for col in impact_variables:
array1.drop([col], axis=1, inplace=True)
# features = [col for col in array if not '_impact' in col]
features = list(self.param["columns"])
features.append("y_prediction")
features.append("y_actual")
form = x.what_if_form(array1, features)
return form
"""
change this. Take input from what-if form.
"""
# Local Feature Impact Graph
@app.callback(
[Output('local_feature_impact', "figure"),
Output('local_message_1', "children"),
Output('local_message_2', "children"),
Output('local_message_3', "children"),
Output('local_message_4', 'children')],
self.callback_input, prevent_initial_call=True)
def update_impact_graph(*values):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
df = pd.DataFrame([values[:-1]])
df.columns = self.param["columns"]
array = self.calculate_prediction_shap(df)
# Y_Pred
# Probability_
if self.param["is_classification"]:
y_and_prob = []
y_and_prob.append(int(array["y_prediction"]))
y_and_prob.append(round(float(array["Probability_" + str(int(array["y_prediction"]))]), 2))
else:
y_and_prob = []
y_and_prob.append(round(float(array["y_prediction"]), 2))
# figure, dat = g.feature_impact_old(array)
figure, dat = g.local_feature_impact_graph(array)
message = self.insights.insight_2_local_feature_impact(dat, y_and_prob)
return figure, message[0], message[1], message[2], message[3]
# Prototypical Analysis
"""
Change this. Take input from what-if from
"""
@app.callback(
Output(component_id='prototype_data', component_property='data'),
[Input(f[0], f[1]) for f in self.prototype_array])
def update_table(*values):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'btn-nclicks-1' in changed_id:
# get x variables and prediction column from the data
df_row = pd.DataFrame([values[:-1]])
df_row.columns = self.param["columns"]
# find prediction and impact values first here please.
df_row = self.calculate_prediction(df_row)
df_selected = df[list(self.param["columns"]) + [self.param["y_variable_predict"]]]
row_number = len(df_selected)
if not isinstance(df_row, pd.DataFrame):
df_row = df_row.as_data_frame()
df_selected.loc[row_number] = df_row.values[0]
p = protodash()
p.preprocess_data(df_selected, self.param["y_variable_predict"])
dfs, sample_row = p.find_prototypes(row_number)
dat = dfs.T.reset_index()
print("sample row columns")
sample_row = sample_row.to_frame()
sample_row.rename(columns={sample_row.columns[0]: "orig"}, inplace=True)
sample_row.reset_index(inplace=True)
# print(sample_row.columns)
dat = pd.merge(dat, sample_row, on=['index'], how='left')
dat['orig'] = dat['orig'].astype(float)
for i in list(dat.columns):
dat[str(i) + '_color'] = np.nan
if i != 'index':
dat[i] = dat[i].astype(float)
dat[str(i) + '_color'] = dat[str(i) + '_color'].astype(float)
dat[str(i) + '_color'] = np.where(dat['orig'] == dat[i], 1, 0)
dat.drop(["index_color", "orig_color"], axis=1, inplace=True)
dat = dat.to_dict('records')
return dat
else:
return []
# Global Feature Importance
@app.callback(
[Output('global_feature_importance', "figure"),
Output('global_message_1', "children")],
[Input('sql-query-button', 'children'),
Input('xaxis-column-test-2', 'value')])
def update_graphs(sql_query, value):
self.analytics.update_data()
self.analytics['function'] = "feature_importance"
self.analytics['time'] = str(datetime.datetime.now())
self.analytics['query'] = sql_query
self.analytics['finish_time'] = ''
self.analytics.insert_data()
g = plotly_graphs()
graph_type = "feature_importance"
dff = self.caching_data_manager(df, sql_query, graph_type, g.feature_importance)
message = self.insights.insight_1_feature_imp(dff)
figure = g.global_feature_importance_graph(dff, self.param["is_classification"])
return figure, message[0]
# Global Feature Impact
@app.callback(
[Output('global_feature_impact', "figure"),
Output('message_1', "children"),
Output('message_2', "children"),
Output('message_3', "children")],
[Input('sql-query-button', 'children'),
Input('xaxis-column-test-2', 'value')])
def update_graphs(sql_query, value):
g = plotly_graphs()
graph_type = "feature_impact"
df3 = self.caching_data_manager(df, sql_query, graph_type, g.feature_impact)
figure = g.global_feature_impact_graph(df3, self.param["is_classification"])
message = self.insights.insight_2_global_feature_impact(df3)
return figure, message[0], message[1], message[2]
# Partial Dependence Plot Graph
@app.callback(
Output('indicator-graphic', 'figure'),
[Input('xaxis-column', 'value'),
# Input('yaxis-column', 'value'),
Input('third-axis', 'value'),
Input('sql-query-button', 'children')])
def update_graph(xaxis_column_name, third_axis_name, sql_query):
self.analytics.update_data()
self.analytics['function'] = "pdp"
self.analytics['time'] = str(datetime.datetime.now())
self.analytics['query'] = sql_query
self.analytics.insert_data()
g = plotly_graphs()
graph_type = 'pdp'
df3 = self.caching_data_manager(df, sql_query, graph_type, g.partial_dependence_plot)
print(df3)
fig = g.pdp_plot(df3, df3[xaxis_column_name], df3[xaxis_column_name + "_impact"], df3[third_axis_name])
return fig
# Summary Plot
@app.callback(
Output('summary_plot', 'figure'),
[Input('sql-query-button', 'children'),
Input('xaxis-column-test', 'value')])
def update_graph2(sql_query, value):
g = plotly_graphs()
graph_type = 'summary_plot'
df3 = self.caching_data_manager(df, sql_query, graph_type, g.summary_plot)
fig = g.summary_plot_graph(df3)
return fig
# Distributions
@app.callback(
Output('indicator-graphic2', 'figure'),
[Input('xaxis-column-name', 'value'),
Input('plot_type', 'value'),
Input('sql-query-button', 'children')])
def update_graph2(xaxis_column_name, plot_type, sql_query):
graph_type = 'filtered_df'
df3 = self.caching_data_manager(df, sql_query, graph_type)
cat_variables = []
num_variables = []
for i in original_variables:
if df[i].dtype == 'object':
cat_variables.append(i)
else:
num_variables.append(i)
if plot_type == "Histogram":
return px.histogram(df3, x=xaxis_column_name, marginal="box", template="plotly_white")
else:
for i in cat_variables:
return px.violin(df3, x=xaxis_column_name, box=True, points='all', template="plotly_white")
else:
return px.violin(df3, y=xaxis_column_name, box=True, points='all', template="plotly_white")
# Port Finder
port = 8080
debug_value = True
if mode == "inline":
try:
app.run_server(mode="inline", port=port, debug=debug_value, dev_tools_ui=debug_value,
dev_tools_props_check=debug_value, dev_tools_silence_routes_logging=True,
dev_tools_hot_reload=True)
except:
port = self.find_free_port()
app.run_server(mode="inline", port=port, debug=debug_value, dev_tools_ui=debug_value,
dev_tools_props_check=debug_value, dev_tools_silence_routes_logging=True,
dev_tools_hot_reload=True)
else:
try:
app.run_server(host='0.0.0.0', port=port, debug=debug_value, dev_tools_ui=debug_value,
dev_tools_props_check=debug_value, dev_tools_silence_routes_logging=True,
dev_tools_hot_reload=True)
except:
# try different ip in case 0.0.0.0 does not work
try:
try:
port = self.find_free_port()
app.run_server(host='0.0.0.0', port=port, debug=debug_value, dev_tools_ui=debug_value,
dev_tools_props_check=debug_value, dev_tools_silence_routes_logging=True,
dev_tools_hot_reload=True)
except:
port = self.find_free_port()
app.run_server(host='0.0.0.0', port=port, debug=debug_value, dev_tools_ui=debug_value,
dev_tools_props_check=debug_value, dev_tools_silence_routes_logging=True,
dev_tools_hot_reload=True)
except:
try:
port = self.find_free_port()
app.run_server(host='127.0.0.1', port=port, debug=debug_value, dev_tools_ui=debug_value,
dev_tools_props_check=debug_value, dev_tools_silence_routes_logging=True,
dev_tools_hot_reload=True)
except:
print("Please restart Jupyter Notebook or Python IDE.")
return False
try:
self.increate_counter()
except:
pass
return port
def find_free_port(self):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
# COUNTER FUNCTION (NEEDS TO BE IMPLEMENTED)
def increate_counter(self):
# call api call here
url = 'https://us-central1-explainx-25b88.cloudfunctions.net/increaseCounter'
params = {
}
data = {
"user_id": self.user_id,
"model": self.param["model_name"]
}
r = requests.post(url, params=params, json=data)
if r.json()["message"] == "200":
return True
else:
return False
def calculate_prediction_shap(self, df):
if self.param["model_name"] == "xgboost":
import xgboost
if xgboost.__version__ in ['1.1.0', '1.1.1', '1.1.0rc2', '1.1.0rc1']:
print(
"Current Xgboost version is not supported. Please install Xgboost using 'pip install xgboost==1.0.2'")
return False
prediction_col = self.param["model"].predict(xgboost.DMatrix(df))
elif self.param["model_name"] == "catboost":
prediction_col = self.param["model"].predict(df.to_numpy())
elif self.param['model_name'] == 'h2o':
df = h2o.H2OFrame(df)
prediction_col = self.param["model"].predict(df)
else:
prediction_col = self.param["model"].predict(df.to_numpy())
# is classification?
is_classification = self.param["is_classification"]
# shap
c = calculate_shap()
df_final, explainer = c.find(self.param["model"], df, prediction_col, is_classification,
model_name=self.param["model_name"])
# prediction col
# df_final["y_prediction"] = prediction_col
if is_classification is True:
try:
df_final = self.formatting_y_pred_for_h2o_classification(df_final, prediction_col)
# find and add probabilities in the dataset.
prediction_col_prob = self.param["model"].predict_proba(df.to_numpy())
except:
prediction_col_prob = self.param["model"].predict(df)
prediction_col_prob = prediction_col_prob.as_data_frame()
pd_prediction_col_prob = pd.DataFrame(prediction_col_prob)
for c in pd_prediction_col_prob.columns:
df_final["Probability_" + str(c)] = list(pd_prediction_col_prob[c])
# for c in pd_prediction_col_prob.columns:
# df_final["Probability_" + str(c)] = list(pd_prediction_col_prob[c])
# if c != 'predict':
# if "p" in c:
# res = c.split("p")[-1]
# df_final["Probability_" + str(res)] = list(pd_prediction_col_prob[c])
# else:
# df_final["Probability_" + str(c)] = list(pd_prediction_col_prob[c])
# else:
# df_final["Probability_" + str(c)] = list(pd_prediction_col_prob[c])
df_final = self.formatting_h2o_prediction_prob(df_final, pd_prediction_col_prob)
return df_final
def formatting_y_pred_for_h2o_classification(self, final_df, pred_col):
try:
final_df["y_prediction"] = pred_col
except:
# df_final = df_final.as_data_frame()
print("prediction col checking")
prediction_col = pred_col.as_data_frame()
final_df["y_prediction"] = prediction_col['predict'].iloc[0]
return final_df
def formatting_h2o_prediction_prob(self, final_df, h2o_pred):
for c in h2o_pred.columns:
final_df["Probability_" + str(c)] = list(h2o_pred[c])
if c != 'predict':
if "p" in c:
res = c.split("p")[-1]
final_df["Probability_" + str(res)] = list(h2o_pred[c])
else:
final_df["Probability_" + str(c)] = list(h2o_pred[c])
else:
final_df["Probability_" + str(c)] = list(h2o_pred[c])
return final_df
def calculate_prediction(self, df):
if self.param["model_name"] == "xgboost":
import xgboost
if xgboost.__version__ in ['1.1.0', '1.1.1', '1.1.0rc2', '1.1.0rc1']:
print(
"Current Xgboost version is not supported. Please install Xgboost using 'pip install xgboost==1.0.2'")
return False
prediction_col = self.param["model"].predict(xgboost.DMatrix(df))
elif self.param["model_name"] == "catboost":
prediction_col = self.param["model"].predict(df.to_numpy())
elif self.param['model_name'] == 'h2o':
df = h2o.H2OFrame(df)
prediction_col = self.param["model"].predict(df)
else:
prediction_col = self.param["model"].predict(df.to_numpy())
# is classification?
is_classification = self.param["is_classification"]
# prediction col
df[self.param["y_variable_predict"]] = prediction_col
return df
def localtunnel(port):
# subdomain= 'explainx-'+ get_random_string(10)
task = subprocess.Popen(['lt', '-h', '"https://serverless.social"', '-p', str(port)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid)
outpt = task.stdout.readline()
outpt_string = outpt.decode("utf-8").split("is:")
print('Explainx.ai is running @ ' + outpt_string[1])
def get_random_string(length):
letters = string.ascii_lowercase + string.ascii_uppercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
| 44.007659 | 208 | 0.529995 | from imports import *
from plotly_graphs import *
from protodash import *
from insights import *
from plotly_css import *
import pandasql as psql
import string
import random
import os
from apps import global_explanation, local_explanation, distribution, feature_interaction, cohort
from app import app
from what_if import *
from calculate_shap import *
from analytics import Analytics
from cohort_analysis import *
class dashboard():
def __init__(self):
super(dashboard, self).__init__()
self.param = None
self.query_dict = dict()
self.filtered_dataframe = dict()
self.feature_importance = dict()
self.pdp = dict()
self.summary_plot = dict()
self.feature_impact = dict()
self.multi_level_eda = dict()
def create_dir(self, dir_name):
if not os.path.exists(dir_name):
os.mkdir(dir_name)
def random_string_generator(self):
random_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(10))
return random_str
def caching_data_exist_in_dict(self, sql_query, graph_type):
if graph_type == 'feature_importance':
if sql_query in self.feature_importance:
return True, self.feature_importance[sql_query]
return False, None
elif graph_type == 'pdp':
if sql_query in self.pdp:
return True, self.pdp[sql_query]
return False, None
elif graph_type == 'summary_plot':
if sql_query in self.summary_plot:
return True, self.summary_plot[sql_query]
return False, None
elif graph_type == 'feature_impact':
if sql_query in self.feature_impact:
return True, self.feature_impact[sql_query]
return False, None
elif graph_type == 'multi_level_eda':
if sql_query in self.multi_level_eda:
return True, self.multi_level_eda[sql_query]
return False, None
elif graph_type == 'filtered_df':
if sql_query in self.query_dict:
return True, self.query_dict[sql_query]
return False, None
def caching_exists_in_file(self, sql_query, graph_type):
self.create_dir("data_storage")
self.create_dir("data_storage/{}".format(graph_type))
try:
dictionary_csv = pd.read_csv("./data_storage/filtered_df/dictionary_bkp.csv")
random_id = dictionary_csv[(dictionary_csv['sql'] == sql_query) & (dictionary_csv['type'] == graph_type)
& (dictionary_csv['instance_id'] == self.instance_id)]
random_id.drop_duplicates(['sql'], keep='last', inplace=True)
if not random_id.empty:
result = random_id['random_id'].iloc[0]
dff = pd.read_csv("./data_storage/{}/{}.csv".format(graph_type, result))
return True, True, dff
return False, True, None
except Exception as e:
return False, False, None
def creating_filtered_backup_file(self, sql_query, random_str, graph_type):
dict_bkp = pd.DataFrame(data={"sql": [sql_query], "random_id": [random_str], "type": [graph_type],
"instance_id": [self.instance_id]})
return dict_bkp
def store_data_in_csv(self, df, graph_type, random_str):
df.to_csv("./data_storage/{}/{}.csv".format(graph_type, random_str), index=False)
def store_data_in_dict(self, df, sql_query, graph_type):
if graph_type == 'feature_importance':
self.feature_importance[sql_query] = df
elif graph_type == 'pdp':
self.pdp[sql_query] = df
elif graph_type == 'summary_plot':
self.summary_plot[sql_query] = df
elif graph_type == 'feature_impact':
self.feature_impact[sql_query] = df
elif graph_type == 'multi_level_eda':
self.multi_level_eda[sql_query] = df
elif graph_type == 'filtered_df':
self.query_dict[sql_query] = df
def caching_data_manager(self, df, sql_query, graph_type, calculated_funct=None, details_dict=None):
status_file, file_exist, dff = self.caching_exists_in_file(sql_query, graph_type)
if status_file:
return dff
else:
random_str = self.random_string_generator()
dict_bkp = self.creating_filtered_backup_file(sql_query, random_str, graph_type)
dff = psql.sqldf(sql_query, locals())
self.create_dir("data_storage/filtered_df")
dict_bkp.to_csv("./data_storage/{}/dictionary_bkp.csv".format("filtered_df"), mode='a',
header=file_exist is False,
index=False)
dff.to_csv("./data_storage/filtered_df/{}.csv".format(random_str), mode='w',
header=file_exist is False,
index=False)
results = dff
if graph_type != 'filtered_df':
results = calculated_funct(dff, self.param["is_classification"])
self.store_data_in_csv(results, graph_type, random_str)
return results
def find(self, df, mode, param):
self.available_columns = available_columns = list(df.columns)
original_variables = [col for col in df.columns if '_impact' in col]
self.impact_variables = [col for col in original_variables if not '_rescaled' in col]
self.y_variable = param["y_variable"]
self.y_variable_predict = param["y_variable_predict"]
self.param = param
self.instance_id = self.random_string_generator()
self.create_dir("data_storage")
self.create_dir("data_storage/user")
self.user_id = None
self.df = df
self.analytics = Analytics()
self.analytics['ip'] = self.analytics.finding_ip()
self.analytics['mac'] = self.analytics.finding_address()
self.analytics['instance_id'] = self.instance_id
self.analytics['time'] = str(datetime.datetime.now())
self.analytics['total_columns'] = len(self.available_columns)
self.analytics['total_rows'] = len(self.df)
self.analytics['os'] = self.analytics.finding_system()
self.analytics['model_name'] = self.param["model_name"]
self.analytics["function"] = 'explainx.ai'
self.analytics["query"] = "all"
self.analytics['finish_time'] = ''
self.callback_input = [Input(f + '_slider', 'value') for f in self.param["columns"]]
self.callback_input.append(Input('submit-button-state', 'n_clicks'))
# self.callback_input_prototype = [Input(f + '-slider', 'value') for f in self.param["columns"]]
# self.callback_input_prototype.append(Input('btn-nclicks-1', 'n_clicks'))
self.prototype_array = []
for f in self.param["columns"]:
self.prototype_array.append([f + '_slider', 'value'])
self.prototype_array.append(['btn-nclicks-1', 'n_clicks'])
try:
user_id = pd.read_csv("data_storage/user/user_id.csv")
user_id.drop_duplicates(['id'], keep='first', inplace=True)
user_id = user_id['id'].iloc[0]
self.user_id = user_id
except Exception as e:
# print("inside user track" )
user_id_val = self.random_string_generator()
user_id_csv = pd.DataFrame(data={"id": [user_id_val]})
user_id_csv.to_csv("data_storage/user/user_id.csv", index=False)
self.user_id = user_id_val
self.analytics['user_id'] = self.user_id
self.analytics.insert_data()
self.insights = insights(self.param)
d = self.dash(df, mode)
return True
def dash(self, df, mode):
y_variable = self.y_variable
g = plotly_graphs()
ca = cohortAnalysis()
y_variables = [col for col in df.columns if '_impact' in col]
original_variables = [col for col in df.columns if not '_impact' in col]
original_variables = [col for col in original_variables if not '_rescaled' in col]
array = []
PAGE_SIZE = 10
row_number = 1
# y_variable = "predict"
columns = ['index', '0', '1', '2', '3', '4']
dat = []
available_columns = list(df.columns)
# external_stylesheets = [dbc.themes.BOOTSTRAP,
# # {
# # 'href': 'https://stackpath.bootstrapcdn.com/bootstrap/4.5.0/css/bootstrap.min.css',
# # 'rel': 'stylesheet'
# # }
# ]
# app = JupyterDash(__name__, external_stylesheets=external_stylesheets)
app.title = "explainX.ai - Main Dashboard"
PLOTLY_LOGO = "https://i.ibb.co/ZTWtVDV/explainx-logo.png"
menu = dbc.Row(
[
dbc.Col(dbc.NavItem(dbc.NavLink("Home", href="/apps/")),
style={'width': "150px", 'fontSize': '10px'}),
dbc.Col(dbc.NavItem(dbc.NavLink("Global Explanation", href="/apps/global_explanation")),
style={'width': "150px", 'fontSize': '10px'}),
dbc.Col(dbc.NavItem(dbc.NavLink("Local Explanation", href="/apps/local_explanation")),
style={'width': "150px", 'fontSize': '10px'}),
dbc.Col(dbc.NavItem(dbc.NavLink("Feature Interaction", href="/apps/feature_interaction")),
style={'width': "150px", 'fontSize': '10px'}),
dbc.Col(dbc.NavItem(dbc.NavLink("Distributions", href="/apps/distribution")),
style={'width': "150px", 'fontSize': '10px'}),
dbc.Col(dbc.NavItem(dbc.NavLink("Cohort Analysis", href="/apps/cohort")),
style={'width': "150px", 'fontSize': '10px'})
],
no_gutters=True,
className="ml-auto flex-nowrap mt-3 mt-md-0",
align="center"
)
navbar = dbc.Navbar(
[
html.A(
# Use row and col to control vertical alignment of logo / brand
dbc.Row(
[
dbc.Col(html.Img(src=PLOTLY_LOGO, height="30px")),
dbc.Col(dbc.NavbarBrand("explainX.ai", className="ml-2",
style={'fontSize': '15px', 'color': 'black'})),
],
align="center",
no_gutters=True,
),
href="https://www.explainx.ai",
),
dbc.NavbarToggler(id="navbar-toggler"),
dbc.Collapse(menu, id="navbar-collapse", navbar=True),
],
color="light",
dark=True,
)
# add callback for toggling the collapse on small screens
@app.callback(
Output("navbar-collapse", "is_open"),
[Input("navbar-toggler", "n_clicks")],
[State("navbar-collapse", "is_open")],
)
def toggle_navbar_collapse(n, is_open):
if n:
return not is_open
return is_open
@app.callback(Output('page-content', 'children'),
[Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/apps/global_explanation':
return global_explanation.global_explanation(original_variables)
elif pathname == '/apps/feature_interaction':
return feature_interaction.layout_interaction(original_variables, y_variables)
elif pathname == '/apps/distribution':
return distribution.layout_distribution(original_variables)
elif pathname == '/apps/local_explanation':
return local_explanation.layout_local(original_variables, columns, df.columns)
elif pathname == '/apps/cohort':
return cohort.cohort_layout(original_variables)
else:
return welcome_message
welcome_message = html.Div(
[
html.Div([
html.H3("Welcome to ExplainX.ai Explainable AI Dashboard"),
html.H4("Start explaining your model by exploring one of the following options.")
], style={'margin-left': "20px"}),
html.Div([
html.Div([
html.Img(
src='https://lh3.googleusercontent.com/uNkpoLQRIza6SSDk9fQed6bu6c1Q9zDkDjZqkRtF3cU97Smf2rS0soKo2ZQm32gJe8FyLYvNRUYJ-hqM7zs4esIAFchdPmgC4sHgbpxTKkGjm4dRK-NagI56K8vnXg8FH4s_Jct2RaQ',
className="global_explanation_image"),
dcc.Link("Global Explanation", href="/apps/global_explanation", className="link")
], className="welcome_box", id="global_explanation_deets"),
html.Div([
html.Img(
src='https://lh3.googleusercontent.com/LpM-N2QavBVxlMW6vKm5fmT5Qe3mYtI2q2OQc5kgZfW4kKOFlyidgrwT5C9zB3XHZncHvlGcFl82Si7bMDQN_PtLf4PThqdEUs1UwoUPeNHs0VYY6ICKlHi44ppZLZIMQvaWI4bMqm0',
className="global_explanation_image"),
dcc.Link("Local Level Explanation", href="/apps/local_explanation", className="link")
], className="welcome_box", id="local_explanation_deets"),
html.Div([
html.Img(
src='https://lh6.googleusercontent.com/GXnNtGlqQcsRXugwwlZ31SwrJda4Z1WVyIi5i13vZImrUY6YgIZvM81d7zrBOh8n2jK_Lqj3BB5IqS4dU5l680-rZuu40bMXa-EAd8ag3WDIU2SNYieg0DH3e_FwUq5f2-y-X_POE-k',
className="global_explanation_image"),
dcc.Link("Feature Interactions", href="/apps/feature_interaction", className="link")
], className="welcome_box", id="feature_interaction_deets"),
html.Div([
html.Img(
src='https://uploads-ssl.webflow.com/5edc18e72f7ba21e1ed2efae/5f519fd99580494301e67b7b_pipeline.png',
className="global_explanation_image"),
dcc.Link("Cohort Analysis", href="/apps/cohort", className="link")
], className="welcome_box", id="cohort_analysis_deets")
], className="main_welcome_div"),
]
)
app.layout = html.Div([
navbar,
html.Div([
dbc.Card(
[
dbc.CardHeader(
html.H2(
dbc.Button(
"Analyze using SQL",
id="collapse-button-2",
color="link",
style={'fontSize': '10px', 'margin-top': '0px'}), style={"margin-top": "0px"}),
style={"height": "50px"}),
dbc.Collapse(html.Div([
html.Div(dcc.Input(
id='input-on-submit',
type='text',
# placeholder="SELECT * FROM df"
value="SELECT * FROM df",
style={'height': '200px', 'width': '700px', 'fontSize': '10px'})),
html.Button('Execute Query', id='submit-val', n_clicks=1),
html.Div(id='sql-query-button',
children='Enter a value and press submit',
style={'display': 'none'}
)
], style={'marginTop': 0}), id="collapse-2"),
]
),
], style=style4),
html.Div([
dbc.Card(
[
dbc.CardHeader(
html.H2(
dbc.Button(
"View Your Data",
id="collapse-button",
color="link",
style={'fontSize': '10px'}), style={"margin-top": "0px"}),
style={"height": "50px"}),
dbc.Collapse(html.Div([
html.H4('',
style=style1),
html.Div([
dash_table.DataTable(
id='datatable-interactivity',
columns=[
{"name": i, "id": i, "deletable": False, "selectable": True} for i in df.columns
],
# data=df.to_dict('records'),
editable=True,
sort_mode="multi",
# selected_columns=[],
# selected_rows=[],
# page_action="native",
page_current=0,
page_size=PAGE_SIZE,
row_selectable='multi',
page_action='custom',
style_table=style2,
style_header=style3,
style_cell={
"font-family": "Helvetica, Arial, sans-serif",
"fontSize": "11px",
'width': '{}%'.format(len(df.columns)),
'textOverflow': 'ellipsis',
'overflow': 'hidden',
'textAlign': 'left',
'minWidth': '180px', 'width': '180px', 'maxWidth': '180px',
},
css=[
{
'selector': '.dash-spreadsheet td div',
'rule': '''
line-height: 15px;
max-height: 20px; min-height: 20px; height: 20px;
display: block;
overflow-y: hidden;
'''
}])
])
], style={'marginTop': 0}), id="collapse"),
]
),
],
style=style4),
# Pages Data
html.Div([
dcc.Location(id='url', refresh=False),
html.Div(id='page-content')
])
# end of collapsable div
], className="main_div")
# Navigation
# Data Interactivity
@app.callback(
Output('datatable-interactivity', 'data'),
[Input('datatable-interactivity', "page_current"),
Input('datatable-interactivity', "page_size")])
def update_table(page_current, page_size):
return df.iloc[
page_current * page_size:(page_current + 1) * page_size
].to_dict('records')
# Collapse-Toggle
@app.callback(
Output("collapse", "is_open"),
[Input("collapse-button", "n_clicks")],
[State("collapse", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
# Collapse-Toggle 2
@app.callback(
Output("collapse-2", "is_open"),
[Input("collapse-button-2", "n_clicks")],
[State("collapse-2", "is_open")],
)
def toggle_collapse(n, is_open):
if n:
return not is_open
return is_open
# Cohort Analysis - Callbacks
@app.callback(
Output("modal", "is_open"),
[Input("open", "n_clicks"), Input("close", "n_clicks")],
[State("modal", "is_open")],
prevent_initial_call=True)
def toggle_modal(n1, n2, is_open):
if n1 or n2:
return not is_open
return is_open
@app.callback(
[Output("cohort-metrics-div", "children"),
Output("cohort-details", "children"),
Output("cohort-graph", "children")],
[Input("add-cohort", "n_clicks"),
Input("remove-cohort", "n_clicks"),
Input("x-axis", "value")],
[State("demo-dropdown", "value"),
State("demo-operators", "value"),
State("demo-values", "value")])
def cohort_metrics_details(add_cohort, remove_cohort, x_axis, var_name, operator, value):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'remove-cohort' in changed_id:
ca.remove_cohort()
fig = ca.cohort_graph(x_axis)
return ca.cohort_metrics_details(), ca.cohort_details(), dcc.Graph(figure=fig),
else:
ca.add_cohort_metrics(df, var_name, operator, value, self.param["is_classification"])
cohort = ca.add_cohort(df, x_axis, var_name, operator, value)
fig = ca.cohort_graph(x_axis)
return ca.cohort_metrics_details(), ca.cohort_details(), dcc.Graph(figure=fig)
# SQL - Data Input Callback
@app.callback(
dash.dependencies.Output('sql-query-button', 'children'),
[dash.dependencies.Input('submit-val', 'n_clicks')],
[dash.dependencies.State('input-on-submit', 'value')])
def update_output(n_clicks, value):
sql_query = f'{value}'
return sql_query
# What-If Form CallBack
@app.callback(
Output('place_form_here', 'children'),
[Input('row_number', 'value')])
def create_what_if_form(row_number):
self.analytics.update_data()
self.analytics['function'] = "what_if"
self.analytics['time'] = str(datetime.datetime.now())
self.analytics['query'] = row_number
self.analytics.insert_data()
self.analytics['finish_time'] = ''
x = what_if()
i = 0
if type(row_number) == type(1):
i = row_number
array = df[i:i + 1]
array1 = array
impact_variables = [col for col in array if '_impact' in col]
for col in impact_variables:
array1.drop([col], axis=1, inplace=True)
# features = [col for col in array if not '_impact' in col]
features = list(self.param["columns"])
features.append("y_prediction")
features.append("y_actual")
form = x.what_if_form(array1, features)
return form
# Local Feature Impact Graph
@app.callback(
[Output('local_feature_impact', "figure"),
Output('local_message_1', "children"),
Output('local_message_2', "children"),
Output('local_message_3', "children"),
Output('local_message_4', 'children')],
self.callback_input, prevent_initial_call=True)
def update_impact_graph(*values):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
df = pd.DataFrame([values[:-1]])
df.columns = self.param["columns"]
array = self.calculate_prediction_shap(df)
# Y_Pred
# Probability_
if self.param["is_classification"]:
y_and_prob = []
y_and_prob.append(int(array["y_prediction"]))
y_and_prob.append(round(float(array["Probability_" + str(int(array["y_prediction"]))]), 2))
else:
y_and_prob = []
y_and_prob.append(round(float(array["y_prediction"]), 2))
# figure, dat = g.feature_impact_old(array)
figure, dat = g.local_feature_impact_graph(array)
message = self.insights.insight_2_local_feature_impact(dat, y_and_prob)
return figure, message[0], message[1], message[2], message[3]
# Prototypical Analysis
@app.callback(
Output(component_id='prototype_data', component_property='data'),
[Input(f[0], f[1]) for f in self.prototype_array])
def update_table(*values):
changed_id = [p['prop_id'] for p in dash.callback_context.triggered][0]
if 'btn-nclicks-1' in changed_id:
# get x variables and prediction column from the data
df_row = pd.DataFrame([values[:-1]])
df_row.columns = self.param["columns"]
# find prediction and impact values first here please.
df_row = self.calculate_prediction(df_row)
df_selected = df[list(self.param["columns"]) + [self.param["y_variable_predict"]]]
row_number = len(df_selected)
if not isinstance(df_row, pd.DataFrame):
df_row = df_row.as_data_frame()
df_selected.loc[row_number] = df_row.values[0]
p = protodash()
p.preprocess_data(df_selected, self.param["y_variable_predict"])
dfs, sample_row = p.find_prototypes(row_number)
dat = dfs.T.reset_index()
print("sample row columns")
sample_row = sample_row.to_frame()
sample_row.rename(columns={sample_row.columns[0]: "orig"}, inplace=True)
sample_row.reset_index(inplace=True)
# print(sample_row.columns)
dat = pd.merge(dat, sample_row, on=['index'], how='left')
dat['orig'] = dat['orig'].astype(float)
for i in list(dat.columns):
dat[str(i) + '_color'] = np.nan
if i != 'index':
dat[i] = dat[i].astype(float)
dat[str(i) + '_color'] = dat[str(i) + '_color'].astype(float)
dat[str(i) + '_color'] = np.where(dat['orig'] == dat[i], 1, 0)
dat.drop(["index_color", "orig_color"], axis=1, inplace=True)
dat = dat.to_dict('records')
return dat
else:
return []
# Global Feature Importance
@app.callback(
[Output('global_feature_importance', "figure"),
Output('global_message_1', "children")],
[Input('sql-query-button', 'children'),
Input('xaxis-column-test-2', 'value')])
def update_graphs(sql_query, value):
self.analytics.update_data()
self.analytics['function'] = "feature_importance"
self.analytics['time'] = str(datetime.datetime.now())
self.analytics['query'] = sql_query
self.analytics['finish_time'] = ''
self.analytics.insert_data()
g = plotly_graphs()
graph_type = "feature_importance"
dff = self.caching_data_manager(df, sql_query, graph_type, g.feature_importance)
message = self.insights.insight_1_feature_imp(dff)
figure = g.global_feature_importance_graph(dff, self.param["is_classification"])
return figure, message[0]
# Global Feature Impact
@app.callback(
[Output('global_feature_impact', "figure"),
Output('message_1', "children"),
Output('message_2', "children"),
Output('message_3', "children")],
[Input('sql-query-button', 'children'),
Input('xaxis-column-test-2', 'value')])
def update_graphs(sql_query, value):
g = plotly_graphs()
graph_type = "feature_impact"
df3 = self.caching_data_manager(df, sql_query, graph_type, g.feature_impact)
figure = g.global_feature_impact_graph(df3, self.param["is_classification"])
message = self.insights.insight_2_global_feature_impact(df3)
return figure, message[0], message[1], message[2]
# Partial Dependence Plot Graph
@app.callback(
Output('indicator-graphic', 'figure'),
[Input('xaxis-column', 'value'),
# Input('yaxis-column', 'value'),
Input('third-axis', 'value'),
Input('sql-query-button', 'children')])
def update_graph(xaxis_column_name, third_axis_name, sql_query):
self.analytics.update_data()
self.analytics['function'] = "pdp"
self.analytics['time'] = str(datetime.datetime.now())
self.analytics['query'] = sql_query
self.analytics.insert_data()
g = plotly_graphs()
graph_type = 'pdp'
df3 = self.caching_data_manager(df, sql_query, graph_type, g.partial_dependence_plot)
print(df3)
fig = g.pdp_plot(df3, df3[xaxis_column_name], df3[xaxis_column_name + "_impact"], df3[third_axis_name])
return fig
# Summary Plot
@app.callback(
Output('summary_plot', 'figure'),
[Input('sql-query-button', 'children'),
Input('xaxis-column-test', 'value')])
def update_graph2(sql_query, value):
g = plotly_graphs()
graph_type = 'summary_plot'
df3 = self.caching_data_manager(df, sql_query, graph_type, g.summary_plot)
fig = g.summary_plot_graph(df3)
return fig
# Distributions
@app.callback(
Output('indicator-graphic2', 'figure'),
[Input('xaxis-column-name', 'value'),
Input('plot_type', 'value'),
Input('sql-query-button', 'children')])
def update_graph2(xaxis_column_name, plot_type, sql_query):
graph_type = 'filtered_df'
df3 = self.caching_data_manager(df, sql_query, graph_type)
cat_variables = []
num_variables = []
for i in original_variables:
if df[i].dtype == 'object':
cat_variables.append(i)
else:
num_variables.append(i)
if plot_type == "Histogram":
return px.histogram(df3, x=xaxis_column_name, marginal="box", template="plotly_white")
else:
for i in cat_variables:
return px.violin(df3, x=xaxis_column_name, box=True, points='all', template="plotly_white")
else:
return px.violin(df3, y=xaxis_column_name, box=True, points='all', template="plotly_white")
# Port Finder
port = 8080
debug_value = True
if mode == "inline":
try:
app.run_server(mode="inline", port=port, debug=debug_value, dev_tools_ui=debug_value,
dev_tools_props_check=debug_value, dev_tools_silence_routes_logging=True,
dev_tools_hot_reload=True)
except:
port = self.find_free_port()
app.run_server(mode="inline", port=port, debug=debug_value, dev_tools_ui=debug_value,
dev_tools_props_check=debug_value, dev_tools_silence_routes_logging=True,
dev_tools_hot_reload=True)
else:
try:
app.run_server(host='0.0.0.0', port=port, debug=debug_value, dev_tools_ui=debug_value,
dev_tools_props_check=debug_value, dev_tools_silence_routes_logging=True,
dev_tools_hot_reload=True)
except:
# try different ip in case 0.0.0.0 does not work
try:
try:
port = self.find_free_port()
app.run_server(host='0.0.0.0', port=port, debug=debug_value, dev_tools_ui=debug_value,
dev_tools_props_check=debug_value, dev_tools_silence_routes_logging=True,
dev_tools_hot_reload=True)
except:
port = self.find_free_port()
app.run_server(host='0.0.0.0', port=port, debug=debug_value, dev_tools_ui=debug_value,
dev_tools_props_check=debug_value, dev_tools_silence_routes_logging=True,
dev_tools_hot_reload=True)
except:
try:
port = self.find_free_port()
app.run_server(host='127.0.0.1', port=port, debug=debug_value, dev_tools_ui=debug_value,
dev_tools_props_check=debug_value, dev_tools_silence_routes_logging=True,
dev_tools_hot_reload=True)
except:
print("Please restart Jupyter Notebook or Python IDE.")
return False
try:
self.increate_counter()
except:
pass
return port
def find_free_port(self):
with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
s.bind(('', 0))
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return s.getsockname()[1]
# COUNTER FUNCTION (NEEDS TO BE IMPLEMENTED)
def increate_counter(self):
# call api call here
url = 'https://us-central1-explainx-25b88.cloudfunctions.net/increaseCounter'
params = {
}
data = {
"user_id": self.user_id,
"model": self.param["model_name"]
}
r = requests.post(url, params=params, json=data)
if r.json()["message"] == "200":
return True
else:
return False
def calculate_prediction_shap(self, df):
if self.param["model_name"] == "xgboost":
import xgboost
if xgboost.__version__ in ['1.1.0', '1.1.1', '1.1.0rc2', '1.1.0rc1']:
print(
"Current Xgboost version is not supported. Please install Xgboost using 'pip install xgboost==1.0.2'")
return False
prediction_col = self.param["model"].predict(xgboost.DMatrix(df))
elif self.param["model_name"] == "catboost":
prediction_col = self.param["model"].predict(df.to_numpy())
elif self.param['model_name'] == 'h2o':
df = h2o.H2OFrame(df)
prediction_col = self.param["model"].predict(df)
else:
prediction_col = self.param["model"].predict(df.to_numpy())
# is classification?
is_classification = self.param["is_classification"]
# shap
c = calculate_shap()
df_final, explainer = c.find(self.param["model"], df, prediction_col, is_classification,
model_name=self.param["model_name"])
# prediction col
# df_final["y_prediction"] = prediction_col
if is_classification is True:
try:
df_final = self.formatting_y_pred_for_h2o_classification(df_final, prediction_col)
# find and add probabilities in the dataset.
prediction_col_prob = self.param["model"].predict_proba(df.to_numpy())
except:
prediction_col_prob = self.param["model"].predict(df)
prediction_col_prob = prediction_col_prob.as_data_frame()
pd_prediction_col_prob = pd.DataFrame(prediction_col_prob)
for c in pd_prediction_col_prob.columns:
df_final["Probability_" + str(c)] = list(pd_prediction_col_prob[c])
# for c in pd_prediction_col_prob.columns:
# df_final["Probability_" + str(c)] = list(pd_prediction_col_prob[c])
# if c != 'predict':
# if "p" in c:
# res = c.split("p")[-1]
# df_final["Probability_" + str(res)] = list(pd_prediction_col_prob[c])
# else:
# df_final["Probability_" + str(c)] = list(pd_prediction_col_prob[c])
# else:
# df_final["Probability_" + str(c)] = list(pd_prediction_col_prob[c])
df_final = self.formatting_h2o_prediction_prob(df_final, pd_prediction_col_prob)
return df_final
def formatting_y_pred_for_h2o_classification(self, final_df, pred_col):
try:
final_df["y_prediction"] = pred_col
except:
# df_final = df_final.as_data_frame()
print("prediction col checking")
prediction_col = pred_col.as_data_frame()
final_df["y_prediction"] = prediction_col['predict'].iloc[0]
return final_df
def formatting_h2o_prediction_prob(self, final_df, h2o_pred):
for c in h2o_pred.columns:
final_df["Probability_" + str(c)] = list(h2o_pred[c])
if c != 'predict':
if "p" in c:
res = c.split("p")[-1]
final_df["Probability_" + str(res)] = list(h2o_pred[c])
else:
final_df["Probability_" + str(c)] = list(h2o_pred[c])
else:
final_df["Probability_" + str(c)] = list(h2o_pred[c])
return final_df
def calculate_prediction(self, df):
if self.param["model_name"] == "xgboost":
import xgboost
if xgboost.__version__ in ['1.1.0', '1.1.1', '1.1.0rc2', '1.1.0rc1']:
print(
"Current Xgboost version is not supported. Please install Xgboost using 'pip install xgboost==1.0.2'")
return False
prediction_col = self.param["model"].predict(xgboost.DMatrix(df))
elif self.param["model_name"] == "catboost":
prediction_col = self.param["model"].predict(df.to_numpy())
elif self.param['model_name'] == 'h2o':
df = h2o.H2OFrame(df)
prediction_col = self.param["model"].predict(df)
else:
prediction_col = self.param["model"].predict(df.to_numpy())
# is classification?
is_classification = self.param["is_classification"]
# prediction col
df[self.param["y_variable_predict"]] = prediction_col
return df
def localtunnel(port):
# subdomain= 'explainx-'+ get_random_string(10)
task = subprocess.Popen(['lt', '-h', '"https://serverless.social"', '-p', str(port)],
stdout=subprocess.PIPE, stderr=subprocess.PIPE, preexec_fn=os.setsid)
outpt = task.stdout.readline()
outpt_string = outpt.decode("utf-8").split("is:")
print('Explainx.ai is running @ ' + outpt_string[1])
def get_random_string(length):
letters = string.ascii_lowercase + string.ascii_uppercase
result_str = ''.join(random.choice(letters) for i in range(length))
return result_str
| true | true |
f73ae1bfe7e33a8885926aa830257fe8b9e5ba14 | 52 | py | Python | apps/people/__init__.py | heykarimoff/covid19-pcr | 635c7880df3da6b5fc693bed5fa68ea7423fbf54 | [
"MIT"
] | null | null | null | apps/people/__init__.py | heykarimoff/covid19-pcr | 635c7880df3da6b5fc693bed5fa68ea7423fbf54 | [
"MIT"
] | null | null | null | apps/people/__init__.py | heykarimoff/covid19-pcr | 635c7880df3da6b5fc693bed5fa68ea7423fbf54 | [
"MIT"
] | 1 | 2022-02-18T14:23:16.000Z | 2022-02-18T14:23:16.000Z | default_app_config = 'apps.people.apps.PeopleConfig' | 52 | 52 | 0.846154 | default_app_config = 'apps.people.apps.PeopleConfig' | true | true |
f73ae1dea50d9c342b26210dec62ee20d2cef153 | 8,565 | py | Python | Bio/Blast/NCBIWWW.py | cymon/biopython-github-master | 7be9697599296401b0a7126d23b5eda391a94116 | [
"PostgreSQL"
] | 1 | 2016-05-09T13:17:59.000Z | 2016-05-09T13:17:59.000Z | Bio/Blast/NCBIWWW.py | cymon/biopython-github-master | 7be9697599296401b0a7126d23b5eda391a94116 | [
"PostgreSQL"
] | null | null | null | Bio/Blast/NCBIWWW.py | cymon/biopython-github-master | 7be9697599296401b0a7126d23b5eda391a94116 | [
"PostgreSQL"
] | null | null | null | # Copyright 1999 by Jeffrey Chang. All rights reserved.
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
# Patched by Brad Chapman.
# Chris Wroe added modifications for work in myGrid
"""
This module provides code to work with the WWW version of BLAST
provided by the NCBI.
http://blast.ncbi.nlm.nih.gov/
Functions:
qblast Do a BLAST search using the QBLAST API.
"""
try:
import cStringIO as StringIO
except ImportError:
import StringIO
def qblast(program, database, sequence,
auto_format=None,composition_based_statistics=None,
db_genetic_code=None,endpoints=None,entrez_query='(none)',
expect=10.0,filter=None,gapcosts=None,genetic_code=None,
hitlist_size=50,i_thresh=None,layout=None,lcase_mask=None,
matrix_name=None,nucl_penalty=None,nucl_reward=None,
other_advanced=None,perc_ident=None,phi_pattern=None,
query_file=None,query_believe_defline=None,query_from=None,
query_to=None,searchsp_eff=None,service=None,threshold=None,
ungapped_alignment=None,word_size=None,
alignments=500,alignment_view=None,descriptions=500,
entrez_links_new_window=None,expect_low=None,expect_high=None,
format_entrez_query=None,format_object=None,format_type='XML',
ncbi_gi=None,results_file=None,show_overview=None
):
"""Do a BLAST search using the QBLAST server at NCBI.
Supports all parameters of the qblast API for Put and Get.
Some useful parameters:
program blastn, blastp, blastx, tblastn, or tblastx (lower case)
database Which database to search against (e.g. "nr").
sequence The sequence to search.
ncbi_gi TRUE/FALSE whether to give 'gi' identifier.
descriptions Number of descriptions to show. Def 500.
alignments Number of alignments to show. Def 500.
expect An expect value cutoff. Def 10.0.
matrix_name Specify an alt. matrix (PAM30, PAM70, BLOSUM80, BLOSUM45).
filter "none" turns off filtering. Default no filtering
format_type "HTML", "Text", "ASN.1", or "XML". Def. "XML".
entrez_query Entrez query to limit Blast search
hitlist_size Number of hits to return. Default 50
This function does no checking of the validity of the parameters
and passes the values to the server as is. More help is available at:
http://www.ncbi.nlm.nih.gov/BLAST/blast_overview.html
"""
import urllib, urllib2
import time
assert program in ['blastn', 'blastp', 'blastx', 'tblastn', 'tblastx']
# Format the "Put" command, which sends search requests to qblast.
# Parameters taken from http://www.ncbi.nlm.nih.gov/BLAST/Doc/node5.html on 9 July 2007
parameters = [
('AUTO_FORMAT',auto_format),
('COMPOSITION_BASED_STATISTICS',composition_based_statistics),
('DATABASE',database),
('DB_GENETIC_CODE',db_genetic_code),
('ENDPOINTS',endpoints),
('ENTREZ_QUERY',entrez_query),
('EXPECT',expect),
('FILTER',filter),
('GAPCOSTS',gapcosts),
('GENETIC_CODE',genetic_code),
('HITLIST_SIZE',hitlist_size),
('I_THRESH',i_thresh),
('LAYOUT',layout),
('LCASE_MASK',lcase_mask),
('MATRIX_NAME',matrix_name),
('NUCL_PENALTY',nucl_penalty),
('NUCL_REWARD',nucl_reward),
('OTHER_ADVANCED',other_advanced),
('PERC_IDENT',perc_ident),
('PHI_PATTERN',phi_pattern),
('PROGRAM',program),
('QUERY',sequence),
('QUERY_FILE',query_file),
('QUERY_BELIEVE_DEFLINE',query_believe_defline),
('QUERY_FROM',query_from),
('QUERY_TO',query_to),
('SEARCHSP_EFF',searchsp_eff),
('SERVICE',service),
('THRESHOLD',threshold),
('UNGAPPED_ALIGNMENT',ungapped_alignment),
('WORD_SIZE',word_size),
('CMD', 'Put'),
]
query = [x for x in parameters if x[1] is not None]
message = urllib.urlencode(query)
# Send off the initial query to qblast.
# Note the NCBI do not currently impose a rate limit here, other
# than the request not to make say 50 queries at once using multiple
# threads.
request = urllib2.Request("http://blast.ncbi.nlm.nih.gov/Blast.cgi",
message,
{"User-Agent":"BiopythonClient"})
handle = urllib2.urlopen(request)
# Format the "Get" command, which gets the formatted results from qblast
# Parameters taken from http://www.ncbi.nlm.nih.gov/BLAST/Doc/node6.html on 9 July 2007
rid, rtoe = _parse_qblast_ref_page(handle)
parameters = [
('ALIGNMENTS',alignments),
('ALIGNMENT_VIEW',alignment_view),
('DESCRIPTIONS',descriptions),
('ENTREZ_LINKS_NEW_WINDOW',entrez_links_new_window),
('EXPECT_LOW',expect_low),
('EXPECT_HIGH',expect_high),
('FORMAT_ENTREZ_QUERY',format_entrez_query),
('FORMAT_OBJECT',format_object),
('FORMAT_TYPE',format_type),
('NCBI_GI',ncbi_gi),
('RID',rid),
('RESULTS_FILE',results_file),
('SERVICE',service),
('SHOW_OVERVIEW',show_overview),
('CMD', 'Get'),
]
query = [x for x in parameters if x[1] is not None]
message = urllib.urlencode(query)
# Poll NCBI until the results are ready. Use a 3 second wait
delay = 3.0
previous = time.time()
while True:
current = time.time()
wait = previous + delay - current
if wait > 0:
time.sleep(wait)
previous = current + wait
else:
previous = current
request = urllib2.Request("http://blast.ncbi.nlm.nih.gov/Blast.cgi",
message,
{"User-Agent":"BiopythonClient"})
handle = urllib2.urlopen(request)
results = handle.read()
# Can see an "\n\n" page while results are in progress,
# if so just wait a bit longer...
if results=="\n\n":
continue
# XML results don't have the Status tag when finished
if results.find("Status=") < 0:
if results == "\n\n":
continue
break
i = results.index("Status=")
j = results.index("\n", i)
status = results[i+len("Status="):j].strip()
if status.upper() == "READY":
break
return StringIO.StringIO(results)
def _parse_qblast_ref_page(handle):
"""Extract a tuple of RID, RTOE from the 'please wait' page (PRIVATE).
The NCBI FAQ pages use TOE for 'Time of Execution', so RTOE is proably
'Request Time of Execution' and RID would be 'Request Identifier'.
"""
s = handle.read()
i = s.find("RID =")
if i == -1:
rid = None
else:
j = s.find("\n", i)
rid = s[i+len("RID ="):j].strip()
i = s.find("RTOE =")
if i == -1:
rtoe = None
else:
j = s.find("\n", i)
rtoe = s[i+len("RTOE ="):j].strip()
if not rid and not rtoe:
#Can we reliably extract the error message from the HTML page?
#e.g. "Message ID#24 Error: Failed to read the Blast query:
# Nucleotide FASTA provided for protein sequence"
#This occurs inside a <div class="error msInf"> entry so try this:
i = s.find('<div class="error msInf">')
if i != -1:
msg = s[i+len('<div class="error msInf">'):].strip()
msg = msg.split("</div>",1)[0].split("\n",1)[0].strip()
if msg:
raise ValueError("Error message from NCBI: %s" % msg)
#We didn't recognise the error layout :(
raise ValueError("No RID and no RTOE found in the 'please wait' page."
" (there was probably a problem with your request)")
elif not rid:
#Can this happen?
raise ValueError("No RID found in the 'please wait' page."
" (although RTOE = %s)" % repr(rtoe))
elif not rtoe:
#Can this happen?
raise ValueError("No RTOE found in the 'please wait' page."
" (although RID = %s)" % repr(rid))
try:
return rid, int(rtoe)
except ValueError:
raise ValueError("A non-integer RTOE found in " \
+"the 'please wait' page, %s" % repr(rtoe))
| 38.755656 | 95 | 0.608523 |
try:
import cStringIO as StringIO
except ImportError:
import StringIO
def qblast(program, database, sequence,
auto_format=None,composition_based_statistics=None,
db_genetic_code=None,endpoints=None,entrez_query='(none)',
expect=10.0,filter=None,gapcosts=None,genetic_code=None,
hitlist_size=50,i_thresh=None,layout=None,lcase_mask=None,
matrix_name=None,nucl_penalty=None,nucl_reward=None,
other_advanced=None,perc_ident=None,phi_pattern=None,
query_file=None,query_believe_defline=None,query_from=None,
query_to=None,searchsp_eff=None,service=None,threshold=None,
ungapped_alignment=None,word_size=None,
alignments=500,alignment_view=None,descriptions=500,
entrez_links_new_window=None,expect_low=None,expect_high=None,
format_entrez_query=None,format_object=None,format_type='XML',
ncbi_gi=None,results_file=None,show_overview=None
):
import urllib, urllib2
import time
assert program in ['blastn', 'blastp', 'blastx', 'tblastn', 'tblastx']
parameters = [
('AUTO_FORMAT',auto_format),
('COMPOSITION_BASED_STATISTICS',composition_based_statistics),
('DATABASE',database),
('DB_GENETIC_CODE',db_genetic_code),
('ENDPOINTS',endpoints),
('ENTREZ_QUERY',entrez_query),
('EXPECT',expect),
('FILTER',filter),
('GAPCOSTS',gapcosts),
('GENETIC_CODE',genetic_code),
('HITLIST_SIZE',hitlist_size),
('I_THRESH',i_thresh),
('LAYOUT',layout),
('LCASE_MASK',lcase_mask),
('MATRIX_NAME',matrix_name),
('NUCL_PENALTY',nucl_penalty),
('NUCL_REWARD',nucl_reward),
('OTHER_ADVANCED',other_advanced),
('PERC_IDENT',perc_ident),
('PHI_PATTERN',phi_pattern),
('PROGRAM',program),
('QUERY',sequence),
('QUERY_FILE',query_file),
('QUERY_BELIEVE_DEFLINE',query_believe_defline),
('QUERY_FROM',query_from),
('QUERY_TO',query_to),
('SEARCHSP_EFF',searchsp_eff),
('SERVICE',service),
('THRESHOLD',threshold),
('UNGAPPED_ALIGNMENT',ungapped_alignment),
('WORD_SIZE',word_size),
('CMD', 'Put'),
]
query = [x for x in parameters if x[1] is not None]
message = urllib.urlencode(query)
request = urllib2.Request("http://blast.ncbi.nlm.nih.gov/Blast.cgi",
message,
{"User-Agent":"BiopythonClient"})
handle = urllib2.urlopen(request)
rid, rtoe = _parse_qblast_ref_page(handle)
parameters = [
('ALIGNMENTS',alignments),
('ALIGNMENT_VIEW',alignment_view),
('DESCRIPTIONS',descriptions),
('ENTREZ_LINKS_NEW_WINDOW',entrez_links_new_window),
('EXPECT_LOW',expect_low),
('EXPECT_HIGH',expect_high),
('FORMAT_ENTREZ_QUERY',format_entrez_query),
('FORMAT_OBJECT',format_object),
('FORMAT_TYPE',format_type),
('NCBI_GI',ncbi_gi),
('RID',rid),
('RESULTS_FILE',results_file),
('SERVICE',service),
('SHOW_OVERVIEW',show_overview),
('CMD', 'Get'),
]
query = [x for x in parameters if x[1] is not None]
message = urllib.urlencode(query)
delay = 3.0
previous = time.time()
while True:
current = time.time()
wait = previous + delay - current
if wait > 0:
time.sleep(wait)
previous = current + wait
else:
previous = current
request = urllib2.Request("http://blast.ncbi.nlm.nih.gov/Blast.cgi",
message,
{"User-Agent":"BiopythonClient"})
handle = urllib2.urlopen(request)
results = handle.read()
if results=="\n\n":
continue
if results.find("Status=") < 0:
if results == "\n\n":
continue
break
i = results.index("Status=")
j = results.index("\n", i)
status = results[i+len("Status="):j].strip()
if status.upper() == "READY":
break
return StringIO.StringIO(results)
def _parse_qblast_ref_page(handle):
s = handle.read()
i = s.find("RID =")
if i == -1:
rid = None
else:
j = s.find("\n", i)
rid = s[i+len("RID ="):j].strip()
i = s.find("RTOE =")
if i == -1:
rtoe = None
else:
j = s.find("\n", i)
rtoe = s[i+len("RTOE ="):j].strip()
if not rid and not rtoe:
#Can we reliably extract the error message from the HTML page?
#e.g. "Message ID#24 Error: Failed to read the Blast query:
# Nucleotide FASTA provided for protein sequence"
#This occurs inside a <div class="error msInf"> entry so try this:
i = s.find('<div class="error msInf">')
if i != -1:
msg = s[i+len('<div class="error msInf">'):].strip()
msg = msg.split("</div>",1)[0].split("\n",1)[0].strip()
if msg:
raise ValueError("Error message from NCBI: %s" % msg)
#We didn't recognise the error layout :(
raise ValueError("No RID and no RTOE found in the 'please wait' page."
" (there was probably a problem with your request)")
elif not rid:
raise ValueError("No RID found in the 'please wait' page."
" (although RTOE = %s)" % repr(rtoe))
elif not rtoe:
raise ValueError("No RTOE found in the 'please wait' page."
" (although RID = %s)" % repr(rid))
try:
return rid, int(rtoe)
except ValueError:
raise ValueError("A non-integer RTOE found in " \
+"the 'please wait' page, %s" % repr(rtoe))
| true | true |
f73ae27acfddb97a5a793e676d74a4d7a58eef84 | 22,342 | py | Python | cartridge/shop/migrations/0016_add_field_product__meta_title.py | AlexHill/cartridge | cb8599d43600442a223a484dc75726bfbbec68a0 | [
"BSD-2-Clause"
] | null | null | null | cartridge/shop/migrations/0016_add_field_product__meta_title.py | AlexHill/cartridge | cb8599d43600442a223a484dc75726bfbbec68a0 | [
"BSD-2-Clause"
] | null | null | null | cartridge/shop/migrations/0016_add_field_product__meta_title.py | AlexHill/cartridge | cb8599d43600442a223a484dc75726bfbbec68a0 | [
"BSD-2-Clause"
] | null | null | null | from __future__ import unicode_literals
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Product._meta_title'
db.add_column('shop_product', '_meta_title',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Product._meta_title'
db.delete_column('shop_product', '_meta_title')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'generic.rating': {
'Meta': {'object_name': 'Rating'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'pages.page': {
'Meta': {'ordering': "('titles',)", 'object_name': 'Page'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '[1, 2, 3]', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
#'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
},
'shop.cart': {
'Meta': {'object_name': 'Cart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'shop.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Cart']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'shop.category': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Category', '_ormbases': ['pages.Page']},
'combined': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product_options'", 'blank': 'True', 'to': "orm['shop.ProductOption']"}),
'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'price_max': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'price_min': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'sale': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.Sale']", 'null': 'True', 'blank': 'True'})
},
'shop.discountcode': {
'Meta': {'object_name': 'DiscountCode'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'discountcode_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'code': ('cartridge.shop.fields.DiscountCodeField', [], {'unique': 'True', 'max_length': '20'}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'free_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_purchase': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'uses_remaining': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'shop.order': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Order'},
'additional_instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'billing_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'billing_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'billing_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'discount_code': ('cartridge.shop.fields.DiscountCodeField', [], {'max_length': '20', 'blank': 'True'}),
'discount_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'shipping_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'shipping_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'shipping_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'shipping_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'shop.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Order']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.product': {
'Meta': {'object_name': 'Product'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
#'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']", 'frozen_by_south': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
#'rating': ('mezzanine.generic.fields.RatingField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.Rating']", 'frozen_by_south': 'True'}),
'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'upsell_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'upsell_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"})
},
'shop.productaction': {
'Meta': {'unique_together': "(('product', 'timestamp'),)", 'object_name': 'ProductAction'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'to': "orm['shop.Product']"}),
'timestamp': ('django.db.models.fields.IntegerField', [], {}),
'total_cart': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_purchase': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shop.productimage': {
'Meta': {'ordering': "('_order',)", 'object_name': 'ProductImage'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['shop.Product']"})
},
'shop.productoption': {
'Meta': {'object_name': 'ProductOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'shop.productvariation': {
'Meta': {'ordering': "('-default',)", 'object_name': 'ProductVariation'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.ProductImage']", 'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'option1': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'option2': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variations'", 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.sale': {
'Meta': {'object_name': 'Sale'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sale_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['shop']
| 87.273438 | 197 | 0.561409 | from __future__ import unicode_literals
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
db.add_column('shop_product', '_meta_title',
self.gf('django.db.models.fields.CharField')(max_length=500, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
db.delete_column('shop_product', '_meta_title')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
'generic.rating': {
'Meta': {'object_name': 'Rating'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'pages.page': {
'Meta': {'ordering': "('titles',)", 'object_name': 'Page'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_menus': ('mezzanine.pages.fields.MenusField', [], {'default': '[1, 2, 3]', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
},
'shop.cart': {
'Meta': {'object_name': 'Cart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'shop.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Cart']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'shop.category': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Category', '_ormbases': ['pages.Page']},
'combined': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product_options'", 'blank': 'True', 'to': "orm['shop.ProductOption']"}),
'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'price_max': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'price_min': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'sale': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.Sale']", 'null': 'True', 'blank': 'True'})
},
'shop.discountcode': {
'Meta': {'object_name': 'DiscountCode'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'discountcode_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'code': ('cartridge.shop.fields.DiscountCodeField', [], {'unique': 'True', 'max_length': '20'}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'free_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_purchase': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'uses_remaining': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'shop.order': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Order'},
'additional_instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'billing_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'billing_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'billing_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'discount_code': ('cartridge.shop.fields.DiscountCodeField', [], {'max_length': '20', 'blank': 'True'}),
'discount_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'shipping_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'shipping_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'shipping_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'shipping_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'transaction_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'shop.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Order']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.product': {
'Meta': {'object_name': 'Product'},
'_meta_title': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Category']", 'symmetrical': 'False', 'blank': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'gen_description': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'upsell_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'upsell_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"})
},
'shop.productaction': {
'Meta': {'unique_together': "(('product', 'timestamp'),)", 'object_name': 'ProductAction'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'to': "orm['shop.Product']"}),
'timestamp': ('django.db.models.fields.IntegerField', [], {}),
'total_cart': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_purchase': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shop.productimage': {
'Meta': {'ordering': "('_order',)", 'object_name': 'ProductImage'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['shop.Product']"})
},
'shop.productoption': {
'Meta': {'object_name': 'ProductOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'shop.productvariation': {
'Meta': {'ordering': "('-default',)", 'object_name': 'ProductVariation'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.ProductImage']", 'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'option1': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'option2': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variations'", 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.sale': {
'Meta': {'object_name': 'Sale'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sale_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('cartridge.shop.fields.PercentageField', [], {'null': 'True', 'max_digits': '5', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['shop']
| true | true |
f73ae3119d68870cdca0bd4fd22722754526fc33 | 3,689 | py | Python | contrib/macdeploy/custom_dsstore.py | celbalrai/bytz | 23557be163aa3908ba4ae2dc1ec11bce12d1ee2d | [
"MIT"
] | 2 | 2021-09-11T22:50:58.000Z | 2021-09-30T19:55:30.000Z | contrib/macdeploy/custom_dsstore.py | celbalrai/bytz | 23557be163aa3908ba4ae2dc1ec11bce12d1ee2d | [
"MIT"
] | 3 | 2021-07-19T10:25:36.000Z | 2021-07-21T10:47:31.000Z | contrib/macdeploy/custom_dsstore.py | celbalrai/bytz | 23557be163aa3908ba4ae2dc1ec11bce12d1ee2d | [
"MIT"
] | 8 | 2021-03-23T13:25:08.000Z | 2022-03-09T10:45:53.000Z | #!/usr/bin/env python3
# Copyright (c) 2013-2015 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bytzuser:\x00Documents:\x00bytz:\x00bytz:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bytzuser/Documents/bytz/bytz/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Bytz-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| 61.483333 | 1,817 | 0.722689 |
import biplist
from ds_store import DSStore
from mac_alias import Alias
import sys
output_file = sys.argv[1]
package_name_ns = sys.argv[2]
ds = DSStore.open(output_file, 'w+')
ds['.']['bwsp'] = {
'ShowStatusBar': False,
'WindowBounds': '{{300, 280}, {500, 343}}',
'ContainerShowSidebar': False,
'SidebarWidth': 0,
'ShowTabView': False,
'PreviewPaneVisibility': False,
'ShowToolbar': False,
'ShowSidebar': False,
'ShowPathbar': True
}
icvp = {
'gridOffsetX': 0.0,
'textSize': 12.0,
'viewOptionsVersion': 1,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
'backgroundColorBlue': 1.0,
'iconSize': 96.0,
'backgroundColorGreen': 1.0,
'arrangeBy': 'none',
'showIconPreview': True,
'gridSpacing': 100.0,
'gridOffsetY': 0.0,
'showItemInfo': False,
'labelOnBottom': True,
'backgroundType': 2,
'backgroundColorRed': 1.0
}
alias = Alias.from_bytes(icvp['backgroundImageAlias'])
alias.volume.name = package_name_ns
alias.volume.posix_path = '/Volumes/' + package_name_ns
alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bytzuser:\x00Documents:\x00bytz:\x00bytz:\x00' + package_name_ns + '.temp.dmg'
alias.volume.disk_image_alias.target.posix_path = 'Users/bytzuser/Documents/bytz/bytz/' + package_name_ns + '.temp.dmg'
alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes())
ds['.']['icvp'] = icvp
ds['.']['vSrn'] = ('long', 1)
ds['Applications']['Iloc'] = (370, 156)
ds['Bytz-Qt.app']['Iloc'] = (128, 156)
ds.flush()
ds.close()
| true | true |
f73ae31313d147e209ed4b6100fc309e4b1e8b53 | 623 | py | Python | migrations/20190401140827_create_db.py | Hiome/core-db | 48834464100858207f9930ec51c592ffa3e6acb8 | [
"MIT"
] | null | null | null | migrations/20190401140827_create_db.py | Hiome/core-db | 48834464100858207f9930ec51c592ffa3e6acb8 | [
"MIT"
] | null | null | null | migrations/20190401140827_create_db.py | Hiome/core-db | 48834464100858207f9930ec51c592ffa3e6acb8 | [
"MIT"
] | null | null | null | from yoyo import step
__transactional__ = False
step(
"""
create table rooms (
id text primary key,
name text not null,
occupancy_count smallint
)
""",
"drop table rooms"
)
step(
"""
create table sensors (
id text primary key,
room_id text,
name text,
type text,
data text,
battery real,
last_seen timestamp,
version text
)
""",
"drop table sensors"
)
step(
"""
create table alerts (
source text,
device_id text,
message text,
level text,
occurred_at timestamp not null default current_timestamp
)
""",
"drop table alerts"
)
| 14.159091 | 60 | 0.621188 | from yoyo import step
__transactional__ = False
step(
"""
create table rooms (
id text primary key,
name text not null,
occupancy_count smallint
)
""",
"drop table rooms"
)
step(
"""
create table sensors (
id text primary key,
room_id text,
name text,
type text,
data text,
battery real,
last_seen timestamp,
version text
)
""",
"drop table sensors"
)
step(
"""
create table alerts (
source text,
device_id text,
message text,
level text,
occurred_at timestamp not null default current_timestamp
)
""",
"drop table alerts"
)
| true | true |
f73ae32d45268745bd5104686ff427d4eb9ae7ec | 110 | py | Python | lang/Python/logical-operations.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | lang/Python/logical-operations.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | lang/Python/logical-operations.py | ethansaxenian/RosettaDecode | 8ea1a42a5f792280b50193ad47545d14ee371fb7 | [
"MIT"
] | null | null | null | def logic(a, b):
print(('a and b:', a and b))
print(('a or b:', a or b))
print(('not a:', not a))
| 22 | 32 | 0.463636 | def logic(a, b):
print(('a and b:', a and b))
print(('a or b:', a or b))
print(('not a:', not a))
| true | true |
f73ae7872e61f7219f1b265d16d6de98242da868 | 3,131 | py | Python | 1_django-login-registration_noob/my_project/settings.py | rvcgeeks/Python-Django-101 | c026430b862821feffb45d39cc85cd93bf2e6559 | [
"MIT"
] | null | null | null | 1_django-login-registration_noob/my_project/settings.py | rvcgeeks/Python-Django-101 | c026430b862821feffb45d39cc85cd93bf2e6559 | [
"MIT"
] | null | null | null | 1_django-login-registration_noob/my_project/settings.py | rvcgeeks/Python-Django-101 | c026430b862821feffb45d39cc85cd93bf2e6559 | [
"MIT"
] | null | null | null | """
Django settings for my_project project.
Generated by 'django-admin startproject' using Django 1.10.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '*wi&v$_lj_y9m_4^i583hb+*zdmm&mx_=c$_v*j9lk*tyaiiwj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'my_apps.user_auth',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| 25.663934 | 91 | 0.69786 |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = '*wi&v$_lj_y9m_4^i583hb+*zdmm&mx_=c$_v*j9lk*tyaiiwj'
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'my_apps.user_auth',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'my_project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'my_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| true | true |
f73ae93b5d50ca9a8f7a0a526915ebb7cf8c2c25 | 2,008 | py | Python | PLC/Methods/system/methodSignature.py | dreibh/planetlab-lxc-plcapi | 065dfc54a2b668e99eab343d113f1a31fb154b13 | [
"BSD-3-Clause"
] | null | null | null | PLC/Methods/system/methodSignature.py | dreibh/planetlab-lxc-plcapi | 065dfc54a2b668e99eab343d113f1a31fb154b13 | [
"BSD-3-Clause"
] | null | null | null | PLC/Methods/system/methodSignature.py | dreibh/planetlab-lxc-plcapi | 065dfc54a2b668e99eab343d113f1a31fb154b13 | [
"BSD-3-Clause"
] | null | null | null | from PLC.Parameter import Parameter, Mixed
from PLC.Method import Method, xmlrpc_type
from functools import reduce
class methodSignature(Method):
"""
Returns an array of known signatures (an array of arrays) for the
method name passed. If no signatures are known, returns a
none-array (test for type != array to detect missing signature).
"""
roles = []
accepts = [Parameter(str, "Method name")]
returns = [Parameter([str], "Method signature")]
def __init__(self, api):
Method.__init__(self, api)
self.name = "system.methodSignature"
def possible_signatures(self, signature, arg):
"""
Return a list of the possible new signatures given a current
signature and the next argument.
"""
if isinstance(arg, Mixed):
arg_types = [xmlrpc_type(mixed_arg) for mixed_arg in arg]
else:
arg_types = [xmlrpc_type(arg)]
return [signature + [arg_type] for arg_type in arg_types]
def signatures(self, returns, args):
"""
Returns a list of possible signatures given a return value and
a set of arguments.
"""
signatures = [[xmlrpc_type(returns)]]
for arg in args:
# Create lists of possible new signatures for each current
# signature. Reduce the list of lists back down to a
# single list.
signatures = reduce(lambda a, b: a + b,
[self.possible_signatures(signature, arg) \
for signature in signatures])
return signatures
def call(self, method):
function = self.api.callable(method)
(min_args, max_args, defaults) = function.args()
signatures = []
assert len(max_args) >= len(min_args)
for num_args in range(len(min_args), len(max_args) + 1):
signatures += self.signatures(function.returns, function.accepts[:num_args])
return signatures
| 32.387097 | 88 | 0.613546 | from PLC.Parameter import Parameter, Mixed
from PLC.Method import Method, xmlrpc_type
from functools import reduce
class methodSignature(Method):
roles = []
accepts = [Parameter(str, "Method name")]
returns = [Parameter([str], "Method signature")]
def __init__(self, api):
Method.__init__(self, api)
self.name = "system.methodSignature"
def possible_signatures(self, signature, arg):
if isinstance(arg, Mixed):
arg_types = [xmlrpc_type(mixed_arg) for mixed_arg in arg]
else:
arg_types = [xmlrpc_type(arg)]
return [signature + [arg_type] for arg_type in arg_types]
def signatures(self, returns, args):
signatures = [[xmlrpc_type(returns)]]
for arg in args:
signatures = reduce(lambda a, b: a + b,
[self.possible_signatures(signature, arg) \
for signature in signatures])
return signatures
def call(self, method):
function = self.api.callable(method)
(min_args, max_args, defaults) = function.args()
signatures = []
assert len(max_args) >= len(min_args)
for num_args in range(len(min_args), len(max_args) + 1):
signatures += self.signatures(function.returns, function.accepts[:num_args])
return signatures
| true | true |
f73aead0cc86b06c28033b4cffeb0979873dae8a | 724 | py | Python | manage.py | pra17dod/Help-People | 2f41d8609749db0134e69ca6d0f57fc165d15e71 | [
"Apache-2.0"
] | null | null | null | manage.py | pra17dod/Help-People | 2f41d8609749db0134e69ca6d0f57fc165d15e71 | [
"Apache-2.0"
] | null | null | null | manage.py | pra17dod/Help-People | 2f41d8609749db0134e69ca6d0f57fc165d15e71 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'helppeople.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
from django_secrets.startup import check
check()
main()
| 27.846154 | 74 | 0.68232 |
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'helppeople.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
from django_secrets.startup import check
check()
main()
| true | true |
f73aeb8a85883b82fe54cba500e7c8d3ee740b33 | 1,898 | py | Python | mlb-ml/api/gameday_api_handler.py | alhart2015/mlb-ml | 5afe4db9f21650c67adeae3c20b558aa0406cb5b | [
"MIT"
] | null | null | null | mlb-ml/api/gameday_api_handler.py | alhart2015/mlb-ml | 5afe4db9f21650c67adeae3c20b558aa0406cb5b | [
"MIT"
] | null | null | null | mlb-ml/api/gameday_api_handler.py | alhart2015/mlb-ml | 5afe4db9f21650c67adeae3c20b558aa0406cb5b | [
"MIT"
] | null | null | null | """
Read from the MLB Gameday API.
Base URL: https://statsapi.mlb.com/docs/#operation/stats
Hitter stat URL: https://statsapi.mlb.com/api/v1/stats?stats=season&group=hitting
"""
from typing import Dict, List
from schema.player import Player
from schema.team import Team
import requests
import utils
def get_top_hitter_stats() -> List[Player]:
"""
Pull from the MLB Gameday API for hitter stats.
todo: figure out how to get all players and not just the top 50
"""
url = utils.HITTER_STATS_URL
response = requests.get(url)
response_json: Dict = response.json()
splits_list = response_json['stats'][0]['splits']
players = []
for split in splits_list:
players.append(Player.from_splits_json(split))
return players
def get_team_info() -> List[Team]:
"""
Pull from the MLB Gameday API for teams. This will give you a comprehensive
list of all teams, hopefully we can use that to pull all stats for all
players on all teams.
"""
url = utils.TEAM_INFO_URL
response = requests.get(url)
response_json: Dict = response.json()
teams = response_json['teams']
parsed_teams = []
for team in teams:
parsed_teams.append(Team.from_json(team))
return parsed_teams
# print(response.text)
def get_hitter_stats_for_team_id(team_id: int, season: int, game_type: str) -> List[Player]:
"""
Get hitter stats for the provided team, season, and game type.
todo: this should def be combined with get_top_hitter_stats()
"""
url = utils.hitter_url_for_team(team_id, season, game_type)
response = requests.get(url)
response_json: Dict = response.json()
splits_list = response_json['stats'][0]['splits']
players = []
for split in splits_list:
players.append(Player.from_splits_json(split))
return players
def test():
print("here") | 24.333333 | 92 | 0.684405 | from typing import Dict, List
from schema.player import Player
from schema.team import Team
import requests
import utils
def get_top_hitter_stats() -> List[Player]:
url = utils.HITTER_STATS_URL
response = requests.get(url)
response_json: Dict = response.json()
splits_list = response_json['stats'][0]['splits']
players = []
for split in splits_list:
players.append(Player.from_splits_json(split))
return players
def get_team_info() -> List[Team]:
url = utils.TEAM_INFO_URL
response = requests.get(url)
response_json: Dict = response.json()
teams = response_json['teams']
parsed_teams = []
for team in teams:
parsed_teams.append(Team.from_json(team))
return parsed_teams
def get_hitter_stats_for_team_id(team_id: int, season: int, game_type: str) -> List[Player]:
url = utils.hitter_url_for_team(team_id, season, game_type)
response = requests.get(url)
response_json: Dict = response.json()
splits_list = response_json['stats'][0]['splits']
players = []
for split in splits_list:
players.append(Player.from_splits_json(split))
return players
def test():
print("here") | true | true |
f73aecc844d33589f61e3d39cf78b5dd02f4dfd0 | 401 | py | Python | dockerfactory/files.py | lukas2511/debian-docker-factory | 73f702f571934022dd3526709be42a8d7a1f1179 | [
"MIT"
] | 2 | 2016-10-21T09:17:09.000Z | 2021-11-15T17:06:25.000Z | dockerfactory/files.py | lukas2511/debian-docker-factory | 73f702f571934022dd3526709be42a8d7a1f1179 | [
"MIT"
] | null | null | null | dockerfactory/files.py | lukas2511/debian-docker-factory | 73f702f571934022dd3526709be42a8d7a1f1179 | [
"MIT"
] | 1 | 2019-11-02T23:34:28.000Z | 2019-11-02T23:34:28.000Z | import os
from glob import glob
def get_latest_file_change(files):
latest = 0
for file in files:
src = file['src']
if os.path.isdir(src):
date = get_latest_file_change(list({'src': x} for x in glob(os.path.join(src, '*'))))
else:
date = os.path.getmtime(src)
if date > latest:
latest = date
return int(latest)
| 23.588235 | 97 | 0.553616 | import os
from glob import glob
def get_latest_file_change(files):
latest = 0
for file in files:
src = file['src']
if os.path.isdir(src):
date = get_latest_file_change(list({'src': x} for x in glob(os.path.join(src, '*'))))
else:
date = os.path.getmtime(src)
if date > latest:
latest = date
return int(latest)
| true | true |
f73aece390b40d44a028d4585966bb5c2e4daddc | 7,156 | py | Python | venv/Lib/site-packages/pandas/tests/series/test_analytics.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/series/test_analytics.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pandas/tests/series/test_analytics.py | OliviaNabbosa89/Disaster_Responses | 1e66d77c303cec685dfc2ca94f4fca4cc9400570 | [
"MIT"
] | null | null | null | import operator
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
class TestSeriesAnalytics:
def test_prod_numpy16_bug(self):
s = Series([1.0, 1.0, 1.0], index=range(3))
result = s.prod()
assert not isinstance(result, Series)
def test_matmul(self):
# matmul test is for GH #10259
a = Series(np.random.randn(4), index=["p", "q", "r", "s"])
b = DataFrame(
np.random.randn(3, 4), index=["1", "2", "3"], columns=["p", "q", "r", "s"]
).T
# Series @ DataFrame -> Series
result = operator.matmul(a, b)
expected = Series(np.dot(a.values, b.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
# DataFrame @ Series -> Series
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
# Series @ Series -> scalar
result = operator.matmul(a, a)
expected = np.dot(a.values, a.values)
tm.assert_almost_equal(result, expected)
# GH 21530
# vector (1D np.array) @ Series (__rmatmul__)
result = operator.matmul(a.values, a)
expected = np.dot(a.values, a.values)
tm.assert_almost_equal(result, expected)
# GH 21530
# vector (1D list) @ Series (__rmatmul__)
result = operator.matmul(a.values.tolist(), a)
expected = np.dot(a.values, a.values)
tm.assert_almost_equal(result, expected)
# GH 21530
# matrix (2D np.array) @ Series (__rmatmul__)
result = operator.matmul(b.T.values, a)
expected = np.dot(b.T.values, a.values)
tm.assert_almost_equal(result, expected)
# GH 21530
# matrix (2D nested lists) @ Series (__rmatmul__)
result = operator.matmul(b.T.values.tolist(), a)
expected = np.dot(b.T.values, a.values)
tm.assert_almost_equal(result, expected)
# mixed dtype DataFrame @ Series
a["p"] = int(a.p)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
# different dtypes DataFrame @ Series
a = a.astype(int)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)"
# exception raised is of type Exception
with pytest.raises(Exception, match=msg):
a.dot(a.values[:3])
msg = "matrices are not aligned"
with pytest.raises(ValueError, match=msg):
a.dot(b.T)
def test_ptp(self):
# GH21614
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
assert np.ptp(ser) == np.ptp(arr)
def test_repeat(self):
s = Series(np.random.randn(3), index=["a", "b", "c"])
reps = s.repeat(5)
exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))
tm.assert_series_equal(reps, exp)
to_rep = [2, 3, 4]
reps = s.repeat(to_rep)
exp = Series(s.values.repeat(to_rep), index=s.index.values.repeat(to_rep))
tm.assert_series_equal(reps, exp)
def test_numpy_repeat(self):
s = Series(np.arange(3), name="x")
expected = Series(s.values.repeat(2), name="x", index=s.index.values.repeat(2))
tm.assert_series_equal(np.repeat(s, 2), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(s, 2, axis=0)
def test_is_monotonic(self):
s = Series(np.random.randint(0, 10, size=1000))
assert not s.is_monotonic
s = Series(np.arange(1000))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(np.arange(1000, 0, -1))
assert s.is_monotonic_decreasing is True
s = Series(pd.date_range("20130101", periods=10))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(list(reversed(s.tolist())))
assert s.is_monotonic is False
assert s.is_monotonic_decreasing is True
@pytest.mark.parametrize("func", [np.any, np.all])
@pytest.mark.parametrize("kwargs", [dict(keepdims=True), dict(out=object())])
@td.skip_if_np_lt("1.15")
def test_validate_any_all_out_keepdims_raises(self, kwargs, func):
s = pd.Series([1, 2])
param = list(kwargs)[0]
name = func.__name__
msg = (
f"the '{param}' parameter is not "
"supported in the pandas "
fr"implementation of {name}\(\)"
)
with pytest.raises(ValueError, match=msg):
func(s, **kwargs)
@td.skip_if_np_lt("1.15")
def test_validate_sum_initial(self):
s = pd.Series([1, 2])
msg = (
r"the 'initial' parameter is not "
r"supported in the pandas "
r"implementation of sum\(\)"
)
with pytest.raises(ValueError, match=msg):
np.sum(s, initial=10)
def test_validate_median_initial(self):
s = pd.Series([1, 2])
msg = (
r"the 'overwrite_input' parameter is not "
r"supported in the pandas "
r"implementation of median\(\)"
)
with pytest.raises(ValueError, match=msg):
# It seems like np.median doesn't dispatch, so we use the
# method instead of the ufunc.
s.median(overwrite_input=True)
@td.skip_if_np_lt("1.15")
def test_validate_stat_keepdims(self):
s = pd.Series([1, 2])
msg = (
r"the 'keepdims' parameter is not "
r"supported in the pandas "
r"implementation of sum\(\)"
)
with pytest.raises(ValueError, match=msg):
np.sum(s, keepdims=True)
def test_td64_summation_overflow(self):
# GH 9442
s = pd.Series(pd.date_range("20130101", periods=100000, freq="H"))
s[0] += pd.Timedelta("1s 1ms")
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.TimedeltaIndex((s - s.min())).asi8 / len(s)).sum())
# the computation is converted to float so
# might be some loss of precision
assert np.allclose(result.value / 1000, expected.value / 1000)
# sum
msg = "overflow in timedelta operation"
with pytest.raises(ValueError, match=msg):
(s - s.min()).sum()
s1 = s[0:10000]
with pytest.raises(ValueError, match=msg):
(s1 - s1.min()).sum()
s2 = s[0:1000]
(s2 - s2.min()).sum()
| 35.078431 | 88 | 0.561906 | import operator
import numpy as np
import pytest
import pandas.util._test_decorators as td
import pandas as pd
from pandas import DataFrame, Series
import pandas._testing as tm
class TestSeriesAnalytics:
def test_prod_numpy16_bug(self):
s = Series([1.0, 1.0, 1.0], index=range(3))
result = s.prod()
assert not isinstance(result, Series)
def test_matmul(self):
a = Series(np.random.randn(4), index=["p", "q", "r", "s"])
b = DataFrame(
np.random.randn(3, 4), index=["1", "2", "3"], columns=["p", "q", "r", "s"]
).T
result = operator.matmul(a, b)
expected = Series(np.dot(a.values, b.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
result = operator.matmul(a, a)
expected = np.dot(a.values, a.values)
tm.assert_almost_equal(result, expected)
result = operator.matmul(a.values, a)
expected = np.dot(a.values, a.values)
tm.assert_almost_equal(result, expected)
result = operator.matmul(a.values.tolist(), a)
expected = np.dot(a.values, a.values)
tm.assert_almost_equal(result, expected)
result = operator.matmul(b.T.values, a)
expected = np.dot(b.T.values, a.values)
tm.assert_almost_equal(result, expected)
result = operator.matmul(b.T.values.tolist(), a)
expected = np.dot(b.T.values, a.values)
tm.assert_almost_equal(result, expected)
a["p"] = int(a.p)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
a = a.astype(int)
result = operator.matmul(b.T, a)
expected = Series(np.dot(b.T.values, a.T.values), index=["1", "2", "3"])
tm.assert_series_equal(result, expected)
msg = r"Dot product shape mismatch, \(4,\) vs \(3,\)"
with pytest.raises(Exception, match=msg):
a.dot(a.values[:3])
msg = "matrices are not aligned"
with pytest.raises(ValueError, match=msg):
a.dot(b.T)
def test_ptp(self):
N = 1000
arr = np.random.randn(N)
ser = Series(arr)
assert np.ptp(ser) == np.ptp(arr)
def test_repeat(self):
s = Series(np.random.randn(3), index=["a", "b", "c"])
reps = s.repeat(5)
exp = Series(s.values.repeat(5), index=s.index.values.repeat(5))
tm.assert_series_equal(reps, exp)
to_rep = [2, 3, 4]
reps = s.repeat(to_rep)
exp = Series(s.values.repeat(to_rep), index=s.index.values.repeat(to_rep))
tm.assert_series_equal(reps, exp)
def test_numpy_repeat(self):
s = Series(np.arange(3), name="x")
expected = Series(s.values.repeat(2), name="x", index=s.index.values.repeat(2))
tm.assert_series_equal(np.repeat(s, 2), expected)
msg = "the 'axis' parameter is not supported"
with pytest.raises(ValueError, match=msg):
np.repeat(s, 2, axis=0)
def test_is_monotonic(self):
s = Series(np.random.randint(0, 10, size=1000))
assert not s.is_monotonic
s = Series(np.arange(1000))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(np.arange(1000, 0, -1))
assert s.is_monotonic_decreasing is True
s = Series(pd.date_range("20130101", periods=10))
assert s.is_monotonic is True
assert s.is_monotonic_increasing is True
s = Series(list(reversed(s.tolist())))
assert s.is_monotonic is False
assert s.is_monotonic_decreasing is True
@pytest.mark.parametrize("func", [np.any, np.all])
@pytest.mark.parametrize("kwargs", [dict(keepdims=True), dict(out=object())])
@td.skip_if_np_lt("1.15")
def test_validate_any_all_out_keepdims_raises(self, kwargs, func):
s = pd.Series([1, 2])
param = list(kwargs)[0]
name = func.__name__
msg = (
f"the '{param}' parameter is not "
"supported in the pandas "
fr"implementation of {name}\(\)"
)
with pytest.raises(ValueError, match=msg):
func(s, **kwargs)
@td.skip_if_np_lt("1.15")
def test_validate_sum_initial(self):
s = pd.Series([1, 2])
msg = (
r"the 'initial' parameter is not "
r"supported in the pandas "
r"implementation of sum\(\)"
)
with pytest.raises(ValueError, match=msg):
np.sum(s, initial=10)
def test_validate_median_initial(self):
s = pd.Series([1, 2])
msg = (
r"the 'overwrite_input' parameter is not "
r"supported in the pandas "
r"implementation of median\(\)"
)
with pytest.raises(ValueError, match=msg):
# method instead of the ufunc.
s.median(overwrite_input=True)
@td.skip_if_np_lt("1.15")
def test_validate_stat_keepdims(self):
s = pd.Series([1, 2])
msg = (
r"the 'keepdims' parameter is not "
r"supported in the pandas "
r"implementation of sum\(\)"
)
with pytest.raises(ValueError, match=msg):
np.sum(s, keepdims=True)
def test_td64_summation_overflow(self):
# GH 9442
s = pd.Series(pd.date_range("20130101", periods=100000, freq="H"))
s[0] += pd.Timedelta("1s 1ms")
# mean
result = (s - s.min()).mean()
expected = pd.Timedelta((pd.TimedeltaIndex((s - s.min())).asi8 / len(s)).sum())
# the computation is converted to float so
# might be some loss of precision
assert np.allclose(result.value / 1000, expected.value / 1000)
# sum
msg = "overflow in timedelta operation"
with pytest.raises(ValueError, match=msg):
(s - s.min()).sum()
s1 = s[0:10000]
with pytest.raises(ValueError, match=msg):
(s1 - s1.min()).sum()
s2 = s[0:1000]
(s2 - s2.min()).sum()
| true | true |
f73aed434e81256421614842c6287ff8d447a05c | 2,287 | py | Python | sitemetrics/providers.py | idlesign/django-sitemetrics | 39f0e468337700d56826f45af407f732da1630cc | [
"BSD-3-Clause"
] | 23 | 2015-03-02T07:34:17.000Z | 2020-08-15T14:42:21.000Z | sitemetrics/providers.py | idlesign/django-sitemetrics | 39f0e468337700d56826f45af407f732da1630cc | [
"BSD-3-Clause"
] | 12 | 2015-03-02T14:06:42.000Z | 2018-09-17T13:53:05.000Z | sitemetrics/providers.py | idlesign/django-sitemetrics | 39f0e468337700d56826f45af407f732da1630cc | [
"BSD-3-Clause"
] | 17 | 2015-02-06T13:38:17.000Z | 2018-10-27T05:46:31.000Z | from typing import List, Type
from django.conf import settings
from django.utils.module_loading import import_module
from django.utils.translation import gettext_lazy as _
class MetricsProvider:
"""Base class for metrics providers."""
alias: str = 'generic'
title: str = 'Generic Provider'
# This can be a dictionary with metrics counter parameters.
# Those parameters will be passed into counter template file -
# templates/sitemetrics/{alias}.html (where `alias` is a provider alias, see above).
params: dict = None
@classmethod
def get_template_name(cls) -> str:
"""Returns js counter code template path."""
return f'sitemetrics/{cls.alias}.html'
@classmethod
def get_params(cls) -> dict:
"""Returns counter parameters dictionary."""
return cls.params or {}
class Yandex(MetricsProvider):
"""Yandex Metrika - http://metrika.yandex.ru/"""
alias: str = 'yandex'
title: str = _('Yandex Metrika')
params: dict = {
'webvisor': True,
'clickmap': True,
'track_links': True,
'accurate_bounce': True,
'no_index': False,
'track_hash': True,
'xml': False,
'user_params': False,
}
class Openstat(MetricsProvider):
"""Openstat - https://www.openstat.com/"""
alias: str = 'openstat'
title: str = _('Openstat')
params: dict = {
'image': None,
'color': None,
'next': 'openstat',
}
class Google(MetricsProvider):
"""Google Analytics - http://www.google.com/analytics/"""
alias: str = 'google'
title: str = _('Google Analytics')
def get_custom_providers() -> List[Type[MetricsProvider]]:
"""Imports providers classes by paths given in SITEMETRICS_PROVIDERS setting."""
providers = getattr(settings, 'SITEMETRICS_PROVIDERS', False)
if not providers:
return []
p_clss = []
for provider_path in providers:
path_splitted = provider_path.split('.')
mod = import_module('.'.join(path_splitted[:-1]))
p_cls = getattr(mod, path_splitted[-1])
p_clss.append(p_cls)
return p_clss
BUILTIN_PROVIDERS = (
Yandex,
Google,
Openstat,
)
METRICS_PROVIDERS = get_custom_providers() or BUILTIN_PROVIDERS
| 24.591398 | 88 | 0.64014 | from typing import List, Type
from django.conf import settings
from django.utils.module_loading import import_module
from django.utils.translation import gettext_lazy as _
class MetricsProvider:
alias: str = 'generic'
title: str = 'Generic Provider'
params: dict = None
@classmethod
def get_template_name(cls) -> str:
return f'sitemetrics/{cls.alias}.html'
@classmethod
def get_params(cls) -> dict:
return cls.params or {}
class Yandex(MetricsProvider):
alias: str = 'yandex'
title: str = _('Yandex Metrika')
params: dict = {
'webvisor': True,
'clickmap': True,
'track_links': True,
'accurate_bounce': True,
'no_index': False,
'track_hash': True,
'xml': False,
'user_params': False,
}
class Openstat(MetricsProvider):
alias: str = 'openstat'
title: str = _('Openstat')
params: dict = {
'image': None,
'color': None,
'next': 'openstat',
}
class Google(MetricsProvider):
alias: str = 'google'
title: str = _('Google Analytics')
def get_custom_providers() -> List[Type[MetricsProvider]]:
providers = getattr(settings, 'SITEMETRICS_PROVIDERS', False)
if not providers:
return []
p_clss = []
for provider_path in providers:
path_splitted = provider_path.split('.')
mod = import_module('.'.join(path_splitted[:-1]))
p_cls = getattr(mod, path_splitted[-1])
p_clss.append(p_cls)
return p_clss
BUILTIN_PROVIDERS = (
Yandex,
Google,
Openstat,
)
METRICS_PROVIDERS = get_custom_providers() or BUILTIN_PROVIDERS
| true | true |
f73aed61e48c769853e9970b2ac51ea49caf0389 | 1,607 | py | Python | gpytorch/models/gplvm/bayesian_gplvm.py | llguo95/gpytorch | 1fa69935104565c377ce95d2c581c9eedfb55817 | [
"MIT"
] | 2,673 | 2018-02-19T22:28:58.000Z | 2022-03-31T13:22:28.000Z | gpytorch/models/gplvm/bayesian_gplvm.py | llguo95/gpytorch | 1fa69935104565c377ce95d2c581c9eedfb55817 | [
"MIT"
] | 1,415 | 2018-02-19T20:38:20.000Z | 2022-03-30T12:53:13.000Z | gpytorch/models/gplvm/bayesian_gplvm.py | llguo95/gpytorch | 1fa69935104565c377ce95d2c581c9eedfb55817 | [
"MIT"
] | 467 | 2018-03-07T02:06:05.000Z | 2022-03-27T07:05:44.000Z | #!/usr/bin/env python3
from ..approximate_gp import ApproximateGP
class BayesianGPLVM(ApproximateGP):
"""
The Gaussian Process Latent Variable Model (GPLVM) class for unsupervised learning.
The class supports
1. Point estimates for latent X when prior_x = None
2. MAP Inference for X when prior_x is not None and inference == 'map'
3. Gaussian variational distribution q(X) when prior_x is not None and inference == 'variational'
.. seealso::
The `GPLVM tutorial
<examples/04_Variational_and_Approximate_GPs/Gaussian_Process_Latent_Variable_Models_with_Stochastic_Variational_Inference.ipynb>`_
for use instructions.
:param X: An instance of a sub-class of the LatentVariable class. One of,
:class:`~gpytorch.models.gplvm.PointLatentVariable`, :class:`~gpytorch.models.gplvm.MAPLatentVariable`, or
:class:`~gpytorch.models.gplvm.VariationalLatentVariable`, to facilitate inference with 1, 2, or 3 respectively.
:type X: ~gpytorch.models.LatentVariable
:param ~gpytorch.variational._VariationalStrategy variational_strategy: The strategy that determines
how the model marginalizes over the variational distribution (over inducing points)
to produce the approximate posterior distribution (over data)
"""
def __init__(self, X, variational_strategy):
super().__init__(variational_strategy)
# Assigning Latent Variable
self.X = X
def forward(self):
raise NotImplementedError
def sample_latent_variable(self):
sample = self.X()
return sample
| 39.195122 | 139 | 0.728687 |
from ..approximate_gp import ApproximateGP
class BayesianGPLVM(ApproximateGP):
def __init__(self, X, variational_strategy):
super().__init__(variational_strategy)
self.X = X
def forward(self):
raise NotImplementedError
def sample_latent_variable(self):
sample = self.X()
return sample
| true | true |
f73aed6ac99a3e3d6af94507f7a2e6c3fc98de37 | 16,402 | py | Python | src/vgg19/theano_model/vgg19_model.py | dkdanielkost/Theano-Style-Transfer | 70438d3de51d059ea2129119a8cfcc86d2b403a9 | [
"MIT"
] | null | null | null | src/vgg19/theano_model/vgg19_model.py | dkdanielkost/Theano-Style-Transfer | 70438d3de51d059ea2129119a8cfcc86d2b403a9 | [
"MIT"
] | null | null | null | src/vgg19/theano_model/vgg19_model.py | dkdanielkost/Theano-Style-Transfer | 70438d3de51d059ea2129119a8cfcc86d2b403a9 | [
"MIT"
] | null | null | null | import numpy
import os
import numpy as np
import logging
from theano.tensor.signal import pool
from theano.tensor.nnet.abstract_conv import bilinear_upsampling
import joblib
from theano.tensor.nnet import conv2d
from theano.tensor.nnet import relu,softmax
import theano
import theano.tensor as T
from theano.tensor.signal.pool import pool_2d
cwd = os.path.dirname(os.path.realpath(__file__))
'''
['convolution2d_4_weights',
'dense_1_weights',
'dense_2_weights',
'convolution2d_8_weights',
'convolution2d_5_weights',
'convolution2d_13_weights',
'convolution2d_7_weights',
'convolution2d_15_weights',
'convolution2d_59_weights',
'convolution2d_14_weights',
'convolution2d_16_weights',
'dense_1_weights',
'convolution2d_6_weights',
'convolution2d_3_weights',
'convolution2d_10_weights',
'convolution2d_1_weights',
'convolution2d_10_weights',
'convolution2d_60_weights',
'convolution2d_2_weights']
'''
# Read in layer weights and save to a dictionary
# Read in layer weights and save to a dictionary
cwd = os.getcwd()
direct = os.path.join(cwd,'theano_model','weights')
# weights_layer_paths = joblib.load(os.path.join(cwd,'weight_names','layer_names_weights'))
layer_weights = {}
for layer_weight_path in range(16):
# head,layer_name = os.path.split(layer_weight_path)
layer_weights[str(layer_weight_path) + '_w'] = joblib.load(os.path.join(direct,str(layer_weight_path) + '_w'))
# Read in bias weights and save to a dictionary
for bias_layer_path in range(16):
layer_weights[str(bias_layer_path) + '_b'] = joblib.load(os.path.join(direct,str(bias_layer_path) + '_b'))
def drop(input, p=0.5):
"""
:type input: numpy.array
:param input: layer or weight matrix on which dropout is applied
:type p: float or double between 0. and 1.
:param p: p probability of NOT dropping out a unit, therefore (1.-p) is the drop rate.
"""
rng = numpy.random.RandomState(1234)
srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
mask = srng.binomial(n=1, p=p, size=input.shape, dtype=theano.config.floatX)
return input * mask
class DropoutHiddenLayer(object):
def __init__(self, is_train, input, W=None, b=None,
activation=relu, p=0.5):
self.input = input
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = activation(lin_output)
# train_output = drop(output, p)
# self.output = T.switch(T.neq(is_train, 0), train_output, p * output)
self.params = [self.W, self.b]
class VGG19_conv2d_layer(object):
"""Pool Layer of a convolutional network """
def __init__(self, input, layer_name,image_shape,
activation=relu, border_mode=(2,2)):
self.activation = activation
self.input = input
self.W = theano.shared(value=np.array(layer_weights[layer_name + '_w'],
dtype=theano.config.floatX),
borrow=True)
self.b = theano.shared(value=np.array(layer_weights[layer_name + '_b'],
dtype=theano.config.floatX
)
, borrow=True)
self.conv_out = conv2d(
input=input,
input_shape=image_shape,
filters=self.W,
filter_shape=layer_weights[layer_name + '_w'].shape,
border_mode=border_mode
)
self.output = activation(self.conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.params = [self.W, self.b]
self.input = input
class VGG19(object):
def __init__(self,input_image_shape,pool_method = 'average_exc_pad'):
IMAGE_H = input_image_shape[2]
IMAGE_W = input_image_shape[3]
self.input = theano.tensor.tensor4('input')
self.conv1_1 = VGG19_conv2d_layer(input=self.input,
layer_name='0',
image_shape=input_image_shape,
border_mode=(1,1))
self.conv1_2 = VGG19_conv2d_layer(input=self.conv1_1.output,
layer_name='1',
image_shape=(None, 64, IMAGE_H, IMAGE_W),
border_mode=(1,1))
self.pool1 = pool_2d(
input = self.conv1_2.output,
ds = (2,2),
mode = pool_method,
ignore_border=True,
st = (2,2))
self.conv2_1 = VGG19_conv2d_layer(input=self.pool1,
layer_name='2',
image_shape=(None, 64, IMAGE_H/2, IMAGE_W/2),
border_mode=(1,1))
self.conv2_2 = VGG19_conv2d_layer(input=self.conv2_1.output,
layer_name='3',
image_shape=(None, 128, IMAGE_H/2, IMAGE_W/2),
border_mode=(1,1))
self.pool2 = pool_2d(
input=self.conv2_2.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
self.conv3_1 = VGG19_conv2d_layer(input=self.pool2,
layer_name='4',
image_shape=(None, 128, IMAGE_H/4, IMAGE_W/4),
border_mode=(1, 1))
self.conv3_2= VGG19_conv2d_layer(input=self.conv3_1.output,
layer_name='5',
image_shape=(None, 128, IMAGE_H/4, IMAGE_W/4),
border_mode=(1, 1))
self.conv3_3 = VGG19_conv2d_layer(input=self.conv3_2.output,
layer_name='6',
image_shape=(None, 128, IMAGE_H/4, IMAGE_W/4),
border_mode=(1, 1))
self.conv3_4 = VGG19_conv2d_layer(input=self.conv3_3.output,
layer_name='7',
image_shape=(None, 128, IMAGE_H/4, IMAGE_W/4),
border_mode=(1, 1))
self.pool3 = pool_2d(
input=self.conv3_4.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
self.conv4_1 = VGG19_conv2d_layer(input=self.pool3,
layer_name='8',
image_shape=(None, 512, IMAGE_H/8, IMAGE_W/8),
border_mode=(1, 1))
self.conv4_2 = VGG19_conv2d_layer(input=self.conv4_1.output,
layer_name='9',
image_shape=(None, 512, IMAGE_H/8, IMAGE_W/8),
border_mode=(1, 1))
self.conv4_3 = VGG19_conv2d_layer(input=self.conv4_2.output,
layer_name='10',
image_shape=(None, 512, IMAGE_H/8, IMAGE_W/8),
border_mode=(1, 1))
self.conv4_4 = VGG19_conv2d_layer(input=self.conv4_3.output,
layer_name='11',
image_shape=(None, 512, IMAGE_H/8, IMAGE_W/8),
border_mode=(1, 1))
self.pool4 = pool_2d(
input=self.conv4_4.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
self.conv5_1 = VGG19_conv2d_layer(input=self.pool4,
layer_name='12',
image_shape=(None, 512, IMAGE_H/16, IMAGE_W/16),
border_mode=(1, 1))
self.conv5_2 = VGG19_conv2d_layer(input=self.conv5_1.output,
layer_name='13',
image_shape=(None, 512, IMAGE_H/16, IMAGE_W/16),
border_mode=(1, 1))
self.conv5_3 = VGG19_conv2d_layer(input=self.conv5_2.output,
layer_name='14',
image_shape=(None, 512, IMAGE_H/16, IMAGE_W/16),
border_mode=(1, 1))
self.conv5_4 = VGG19_conv2d_layer(input=self.conv5_3.output,
layer_name='15',
image_shape=(None, 512, IMAGE_H/16, IMAGE_W/16),
border_mode=(1, 1))
self.pool5 = pool_2d(
input=self.conv5_4.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
# self.dense_1_input= self.pool5.flatten(2)
#
# self.dense_1 = DropoutHiddenLayer(is_train = numpy.cast['int32'](0),
# input = self.dense_1_input,
# W=layer_weights['dense_1_weights'],
# b=layer_weights['dense_1_bias'],)
#
# self.dense_2 = DropoutHiddenLayer(is_train=numpy.cast['int32'](0),
# input=self.dense_1.output,
# W=layer_weights['dense_2_weights'],
# b=layer_weights['dense_2_bias'], )
#
# self.dense_3 = DropoutHiddenLayer(is_train=numpy.cast['int32'](0),
# input=self.dense_2.output,
# W=layer_weights['dense_3_weights'],
# b=layer_weights['dense_3_bias'],
# activation=softmax )
# model.add(Flatten())
# model.add(Dense(4096, activation='relu'))
# model.add(Dropout(0.5))
# model.add(Dense(4096, activation='relu'))
# model.add(Dropout(0.5))
# model.add(Dense(1000, activation='softmax'))
class VGG16(object):
def __init__(self,input_image_shape,pool_method = 'max'):
self.input = theano.tensor.tensor4('input')
self.conv1_1 = VGG19_conv2d_layer(input=self.input,
layer_name='convolution2d_1',
image_shape=input_image_shape,
border_mode=(1,1))
self.conv1_2 = VGG19_conv2d_layer(input=self.conv1_1.output,
layer_name='convolution2d_2',
image_shape=(None, 64, 224, 224),
border_mode=(1,1))
self.pool1 = pool_2d(
input = self.conv1_2.output,
ds = (2,2),
mode = pool_method,
ignore_border=True,
st = (2,2))
self.conv2_1 = VGG19_conv2d_layer(input=self.pool1,
layer_name='convolution2d_3',
image_shape=(None, 64, 112, 112),
border_mode=(1,1))
self.conv2_2 = VGG19_conv2d_layer(input=self.conv2_1.output,
layer_name='convolution2d_4',
image_shape=(None, 128, 112, 112),
border_mode=(1,1))
self.pool2 = pool_2d(
input=self.conv2_2.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
self.conv3_1 = VGG19_conv2d_layer(input=self.pool2,
layer_name='convolution2d_5',
image_shape=(None, 128, 56, 56),
border_mode=(1, 1))
self.conv3_2= VGG19_conv2d_layer(input=self.conv3_1.output,
layer_name='convolution2d_6',
image_shape=(None, 128, 56, 56),
border_mode=(1, 1))
self.conv3_3 = VGG19_conv2d_layer(input=self.conv3_2.output,
layer_name='convolution2d_7',
image_shape=(None, 128, 56, 56),
border_mode=(1, 1))
self.conv3_4 = VGG19_conv2d_layer(input=self.conv3_3.output,
layer_name='convolution2d_8',
image_shape=(None, 128, 56, 56),
border_mode=(1, 1))
self.pool3 = pool_2d(
input=self.conv3_4.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
self.conv4_1 = VGG19_conv2d_layer(input=self.pool3,
layer_name='convolution2d_9',
image_shape=(None, 512, 28, 28),
border_mode=(1, 1))
self.conv4_2 = VGG19_conv2d_layer(input=self.conv4_1.output,
layer_name='convolution2d_10',
image_shape=(None, 512, 28, 28),
border_mode=(1, 1))
self.conv4_3 = VGG19_conv2d_layer(input=self.conv4_2.output,
layer_name='convolution2d_11',
image_shape=(None, 512, 28, 28),
border_mode=(1, 1))
self.conv4_4 = VGG19_conv2d_layer(input=self.conv4_3.output,
layer_name='convolution2d_12',
image_shape=(None, 512, 28, 28),
border_mode=(1, 1))
self.pool4 = pool_2d(
input=self.conv4_4.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
self.conv5_1 = VGG19_conv2d_layer(input=self.pool4,
layer_name='convolution2d_13',
image_shape=(None, 512, 14, 14),
border_mode=(1, 1))
self.conv5_2 = VGG19_conv2d_layer(input=self.conv5_1.output,
layer_name='convolution2d_14',
image_shape=(None, 512, 14, 14),
border_mode=(1, 1))
self.conv5_3 = VGG19_conv2d_layer(input=self.conv5_2.output,
layer_name='convolution2d_15',
image_shape=(None, 512, 14, 14),
border_mode=(1, 1))
self.conv5_4 = VGG19_conv2d_layer(input=self.conv5_3.output,
layer_name='convolution2d_16',
image_shape=(None, 512, 14, 14),
border_mode=(1, 1))
| 45.688022 | 114 | 0.451286 | import numpy
import os
import numpy as np
import logging
from theano.tensor.signal import pool
from theano.tensor.nnet.abstract_conv import bilinear_upsampling
import joblib
from theano.tensor.nnet import conv2d
from theano.tensor.nnet import relu,softmax
import theano
import theano.tensor as T
from theano.tensor.signal.pool import pool_2d
cwd = os.path.dirname(os.path.realpath(__file__))
cwd = os.getcwd()
direct = os.path.join(cwd,'theano_model','weights')
layer_weights = {}
for layer_weight_path in range(16):
layer_weights[str(layer_weight_path) + '_w'] = joblib.load(os.path.join(direct,str(layer_weight_path) + '_w'))
for bias_layer_path in range(16):
layer_weights[str(bias_layer_path) + '_b'] = joblib.load(os.path.join(direct,str(bias_layer_path) + '_b'))
def drop(input, p=0.5):
rng = numpy.random.RandomState(1234)
srng = T.shared_randomstreams.RandomStreams(rng.randint(999999))
mask = srng.binomial(n=1, p=p, size=input.shape, dtype=theano.config.floatX)
return input * mask
class DropoutHiddenLayer(object):
def __init__(self, is_train, input, W=None, b=None,
activation=relu, p=0.5):
self.input = input
self.W = W
self.b = b
lin_output = T.dot(input, self.W) + self.b
self.output = activation(lin_output)
self.params = [self.W, self.b]
class VGG19_conv2d_layer(object):
def __init__(self, input, layer_name,image_shape,
activation=relu, border_mode=(2,2)):
self.activation = activation
self.input = input
self.W = theano.shared(value=np.array(layer_weights[layer_name + '_w'],
dtype=theano.config.floatX),
borrow=True)
self.b = theano.shared(value=np.array(layer_weights[layer_name + '_b'],
dtype=theano.config.floatX
)
, borrow=True)
self.conv_out = conv2d(
input=input,
input_shape=image_shape,
filters=self.W,
filter_shape=layer_weights[layer_name + '_w'].shape,
border_mode=border_mode
)
self.output = activation(self.conv_out + self.b.dimshuffle('x', 0, 'x', 'x'))
self.params = [self.W, self.b]
self.input = input
class VGG19(object):
def __init__(self,input_image_shape,pool_method = 'average_exc_pad'):
IMAGE_H = input_image_shape[2]
IMAGE_W = input_image_shape[3]
self.input = theano.tensor.tensor4('input')
self.conv1_1 = VGG19_conv2d_layer(input=self.input,
layer_name='0',
image_shape=input_image_shape,
border_mode=(1,1))
self.conv1_2 = VGG19_conv2d_layer(input=self.conv1_1.output,
layer_name='1',
image_shape=(None, 64, IMAGE_H, IMAGE_W),
border_mode=(1,1))
self.pool1 = pool_2d(
input = self.conv1_2.output,
ds = (2,2),
mode = pool_method,
ignore_border=True,
st = (2,2))
self.conv2_1 = VGG19_conv2d_layer(input=self.pool1,
layer_name='2',
image_shape=(None, 64, IMAGE_H/2, IMAGE_W/2),
border_mode=(1,1))
self.conv2_2 = VGG19_conv2d_layer(input=self.conv2_1.output,
layer_name='3',
image_shape=(None, 128, IMAGE_H/2, IMAGE_W/2),
border_mode=(1,1))
self.pool2 = pool_2d(
input=self.conv2_2.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
self.conv3_1 = VGG19_conv2d_layer(input=self.pool2,
layer_name='4',
image_shape=(None, 128, IMAGE_H/4, IMAGE_W/4),
border_mode=(1, 1))
self.conv3_2= VGG19_conv2d_layer(input=self.conv3_1.output,
layer_name='5',
image_shape=(None, 128, IMAGE_H/4, IMAGE_W/4),
border_mode=(1, 1))
self.conv3_3 = VGG19_conv2d_layer(input=self.conv3_2.output,
layer_name='6',
image_shape=(None, 128, IMAGE_H/4, IMAGE_W/4),
border_mode=(1, 1))
self.conv3_4 = VGG19_conv2d_layer(input=self.conv3_3.output,
layer_name='7',
image_shape=(None, 128, IMAGE_H/4, IMAGE_W/4),
border_mode=(1, 1))
self.pool3 = pool_2d(
input=self.conv3_4.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
self.conv4_1 = VGG19_conv2d_layer(input=self.pool3,
layer_name='8',
image_shape=(None, 512, IMAGE_H/8, IMAGE_W/8),
border_mode=(1, 1))
self.conv4_2 = VGG19_conv2d_layer(input=self.conv4_1.output,
layer_name='9',
image_shape=(None, 512, IMAGE_H/8, IMAGE_W/8),
border_mode=(1, 1))
self.conv4_3 = VGG19_conv2d_layer(input=self.conv4_2.output,
layer_name='10',
image_shape=(None, 512, IMAGE_H/8, IMAGE_W/8),
border_mode=(1, 1))
self.conv4_4 = VGG19_conv2d_layer(input=self.conv4_3.output,
layer_name='11',
image_shape=(None, 512, IMAGE_H/8, IMAGE_W/8),
border_mode=(1, 1))
self.pool4 = pool_2d(
input=self.conv4_4.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
self.conv5_1 = VGG19_conv2d_layer(input=self.pool4,
layer_name='12',
image_shape=(None, 512, IMAGE_H/16, IMAGE_W/16),
border_mode=(1, 1))
self.conv5_2 = VGG19_conv2d_layer(input=self.conv5_1.output,
layer_name='13',
image_shape=(None, 512, IMAGE_H/16, IMAGE_W/16),
border_mode=(1, 1))
self.conv5_3 = VGG19_conv2d_layer(input=self.conv5_2.output,
layer_name='14',
image_shape=(None, 512, IMAGE_H/16, IMAGE_W/16),
border_mode=(1, 1))
self.conv5_4 = VGG19_conv2d_layer(input=self.conv5_3.output,
layer_name='15',
image_shape=(None, 512, IMAGE_H/16, IMAGE_W/16),
border_mode=(1, 1))
self.pool5 = pool_2d(
input=self.conv5_4.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
class VGG16(object):
def __init__(self,input_image_shape,pool_method = 'max'):
self.input = theano.tensor.tensor4('input')
self.conv1_1 = VGG19_conv2d_layer(input=self.input,
layer_name='convolution2d_1',
image_shape=input_image_shape,
border_mode=(1,1))
self.conv1_2 = VGG19_conv2d_layer(input=self.conv1_1.output,
layer_name='convolution2d_2',
image_shape=(None, 64, 224, 224),
border_mode=(1,1))
self.pool1 = pool_2d(
input = self.conv1_2.output,
ds = (2,2),
mode = pool_method,
ignore_border=True,
st = (2,2))
self.conv2_1 = VGG19_conv2d_layer(input=self.pool1,
layer_name='convolution2d_3',
image_shape=(None, 64, 112, 112),
border_mode=(1,1))
self.conv2_2 = VGG19_conv2d_layer(input=self.conv2_1.output,
layer_name='convolution2d_4',
image_shape=(None, 128, 112, 112),
border_mode=(1,1))
self.pool2 = pool_2d(
input=self.conv2_2.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
self.conv3_1 = VGG19_conv2d_layer(input=self.pool2,
layer_name='convolution2d_5',
image_shape=(None, 128, 56, 56),
border_mode=(1, 1))
self.conv3_2= VGG19_conv2d_layer(input=self.conv3_1.output,
layer_name='convolution2d_6',
image_shape=(None, 128, 56, 56),
border_mode=(1, 1))
self.conv3_3 = VGG19_conv2d_layer(input=self.conv3_2.output,
layer_name='convolution2d_7',
image_shape=(None, 128, 56, 56),
border_mode=(1, 1))
self.conv3_4 = VGG19_conv2d_layer(input=self.conv3_3.output,
layer_name='convolution2d_8',
image_shape=(None, 128, 56, 56),
border_mode=(1, 1))
self.pool3 = pool_2d(
input=self.conv3_4.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
self.conv4_1 = VGG19_conv2d_layer(input=self.pool3,
layer_name='convolution2d_9',
image_shape=(None, 512, 28, 28),
border_mode=(1, 1))
self.conv4_2 = VGG19_conv2d_layer(input=self.conv4_1.output,
layer_name='convolution2d_10',
image_shape=(None, 512, 28, 28),
border_mode=(1, 1))
self.conv4_3 = VGG19_conv2d_layer(input=self.conv4_2.output,
layer_name='convolution2d_11',
image_shape=(None, 512, 28, 28),
border_mode=(1, 1))
self.conv4_4 = VGG19_conv2d_layer(input=self.conv4_3.output,
layer_name='convolution2d_12',
image_shape=(None, 512, 28, 28),
border_mode=(1, 1))
self.pool4 = pool_2d(
input=self.conv4_4.output,
ds=(2, 2),
mode=pool_method,
ignore_border=True,
st=(2, 2))
self.conv5_1 = VGG19_conv2d_layer(input=self.pool4,
layer_name='convolution2d_13',
image_shape=(None, 512, 14, 14),
border_mode=(1, 1))
self.conv5_2 = VGG19_conv2d_layer(input=self.conv5_1.output,
layer_name='convolution2d_14',
image_shape=(None, 512, 14, 14),
border_mode=(1, 1))
self.conv5_3 = VGG19_conv2d_layer(input=self.conv5_2.output,
layer_name='convolution2d_15',
image_shape=(None, 512, 14, 14),
border_mode=(1, 1))
self.conv5_4 = VGG19_conv2d_layer(input=self.conv5_3.output,
layer_name='convolution2d_16',
image_shape=(None, 512, 14, 14),
border_mode=(1, 1))
| true | true |
f73aeddbe4e28a26a55821f625fc47f3d84508cb | 2,796 | py | Python | aliyun-python-sdk-core/aliyunsdkcore/endpoint/endpoint_resolver_rules.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 1,001 | 2015-07-24T01:32:41.000Z | 2022-03-25T01:28:18.000Z | aliyun-python-sdk-core/aliyunsdkcore/endpoint/endpoint_resolver_rules.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 363 | 2015-10-20T03:15:00.000Z | 2022-03-08T12:26:19.000Z | aliyun-python-sdk-core/aliyunsdkcore/endpoint/endpoint_resolver_rules.py | yndu13/aliyun-openapi-python-sdk | 12ace4fb39fe2fb0e3927a4b1b43ee4872da43f5 | [
"Apache-2.0"
] | 682 | 2015-09-22T07:19:02.000Z | 2022-03-22T09:51:46.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with self work for additional information
# regarding copyright ownership. The ASF licenses self file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use self file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from aliyunsdkcore.endpoint.local_config_regional_endpoint_resolver \
import LocalConfigRegionalEndpointResolver
class EndpointResolverRules(LocalConfigRegionalEndpointResolver):
def __init__(self, *args, **kwargs):
LocalConfigRegionalEndpointResolver.__init__(self)
self.product_code_valid = False
self.region_id_valid = False
self.endpoint_map = None
self.endpoint_regional = None
self.request_network = 'public'
self.product_suffix = ''
def resolve(self, request):
if request.endpoint_map is None or request.endpoint_regional is None:
return None
request_network = "public" if not request.request_network else request.request_network
endpoint_regional = request.endpoint_regional
endpoint = ""
if request_network == "public":
endpoint = request.endpoint_map.get(request.region_id, "")
if endpoint == "":
if endpoint_regional == "regional":
if not self.verify_region_id(request.region_id.lower()):
return
endpoint_domain = ".{region_id}.aliyuncs.com".format(
region_id=request.region_id.lower())
elif endpoint_regional == "central":
endpoint_domain = ".aliyuncs.com"
else:
return None
network = "" if request_network == "public" else "-" + request_network
suffix = "-" + request.product_suffix if request.product_suffix else ""
endpoint_param_list = [request.product_code_lower, suffix, network, endpoint_domain]
endpoint = "".join(list(filter(lambda x: x, endpoint_param_list)))
return endpoint
def is_product_code_valid(self, request):
return self.product_code_valid
def is_region_id_valid(self, request):
return self.region_id_valid
@classmethod
def get_valid_region_ids_by_product(cls, product_code):
return None
| 39.942857 | 96 | 0.687411 |
from aliyunsdkcore.endpoint.local_config_regional_endpoint_resolver \
import LocalConfigRegionalEndpointResolver
class EndpointResolverRules(LocalConfigRegionalEndpointResolver):
def __init__(self, *args, **kwargs):
LocalConfigRegionalEndpointResolver.__init__(self)
self.product_code_valid = False
self.region_id_valid = False
self.endpoint_map = None
self.endpoint_regional = None
self.request_network = 'public'
self.product_suffix = ''
def resolve(self, request):
if request.endpoint_map is None or request.endpoint_regional is None:
return None
request_network = "public" if not request.request_network else request.request_network
endpoint_regional = request.endpoint_regional
endpoint = ""
if request_network == "public":
endpoint = request.endpoint_map.get(request.region_id, "")
if endpoint == "":
if endpoint_regional == "regional":
if not self.verify_region_id(request.region_id.lower()):
return
endpoint_domain = ".{region_id}.aliyuncs.com".format(
region_id=request.region_id.lower())
elif endpoint_regional == "central":
endpoint_domain = ".aliyuncs.com"
else:
return None
network = "" if request_network == "public" else "-" + request_network
suffix = "-" + request.product_suffix if request.product_suffix else ""
endpoint_param_list = [request.product_code_lower, suffix, network, endpoint_domain]
endpoint = "".join(list(filter(lambda x: x, endpoint_param_list)))
return endpoint
def is_product_code_valid(self, request):
return self.product_code_valid
def is_region_id_valid(self, request):
return self.region_id_valid
@classmethod
def get_valid_region_ids_by_product(cls, product_code):
return None
| true | true |
f73aeecf43edac2a6921a2dbc82d15123b3f623e | 1,838 | py | Python | sandbox/maya_attribute_operators.py | parzival-roethlein/mapya | 2395a8922e557acfe7dc4b98c13be7f071127277 | [
"MIT"
] | 1 | 2020-03-20T11:48:34.000Z | 2020-03-20T11:48:34.000Z | sandbox/maya_attribute_operators.py | parzival-roethlein/mapya | 2395a8922e557acfe7dc4b98c13be7f071127277 | [
"MIT"
] | null | null | null | sandbox/maya_attribute_operators.py | parzival-roethlein/mapya | 2395a8922e557acfe7dc4b98c13be7f071127277 | [
"MIT"
] | null | null | null | """
makes python operators of Attribute instances use the attribute value.
the reasoning was to make attribute access less verbose and making all Attribute
operators useful.
NOT USED BECAUSE:
- can already be done using the attr value and python zen says:
There should be one-- and preferably only one --obvious way to do it.
- random change. so unexpected by user (not pythonic?!)
CODE USAGE
# inherited by Attribute class
class Attribute(MayaAttributeOperators)
NOTE:
- floordiv (//) ignored, since it is used to disconnect attributes
- rshift (>>), lshift (<<) ignored since it is used to connect attributes
TODO:
- add Identity operators (is, is not) to compare maya attributes (my_node.attr1 is my_node.attr2 # false)
- inner_operator functool.wraps
"""
import operator
class MayaAttributeOperators(object):
pass
def wrap_operator(operator_func, inplace=False):
# TODO:
# @wraps
def inner_operator(self, other):
if isinstance(other, MayaAttributeOperators):
other = other.get()
if inplace:
self.set(operator_func(self.get(), other))
return self
return operator_func(self.get(), other)
inner_operator.__name__ = operator_func.__name__
inner_operator.__doc__ = operator_func.__doc__
return inner_operator
math_operators = ['__add__', '__sub__', '__mul__', '__pow__', '__div__', '__truediv__', '__mod__']
logic_operators = ['__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__']
for op in math_operators + logic_operators:
setattr(MayaAttributeOperators, op, wrap_operator(getattr(operator, op)))
math_inplace_operators = [op.replace('__', '__i', 1) for op in math_operators]
for inplace_op in math_inplace_operators:
setattr(MayaAttributeOperators, inplace_op, wrap_operator(getattr(operator, inplace_op), inplace=True))
| 34.679245 | 107 | 0.73395 | import operator
class MayaAttributeOperators(object):
pass
def wrap_operator(operator_func, inplace=False):
def inner_operator(self, other):
if isinstance(other, MayaAttributeOperators):
other = other.get()
if inplace:
self.set(operator_func(self.get(), other))
return self
return operator_func(self.get(), other)
inner_operator.__name__ = operator_func.__name__
inner_operator.__doc__ = operator_func.__doc__
return inner_operator
math_operators = ['__add__', '__sub__', '__mul__', '__pow__', '__div__', '__truediv__', '__mod__']
logic_operators = ['__lt__', '__le__', '__eq__', '__ne__', '__gt__', '__ge__']
for op in math_operators + logic_operators:
setattr(MayaAttributeOperators, op, wrap_operator(getattr(operator, op)))
math_inplace_operators = [op.replace('__', '__i', 1) for op in math_operators]
for inplace_op in math_inplace_operators:
setattr(MayaAttributeOperators, inplace_op, wrap_operator(getattr(operator, inplace_op), inplace=True))
| true | true |
f73aef0b8f2088ee541437f79d63b7db5df12be3 | 1,391 | py | Python | bots/BasicBot.py | jayantabh/haxball-imitation-learning | fb02203dee6859443ac2bd4334144aacc9f16f89 | [
"MIT"
] | null | null | null | bots/BasicBot.py | jayantabh/haxball-imitation-learning | fb02203dee6859443ac2bd4334144aacc9f16f89 | [
"MIT"
] | null | null | null | bots/BasicBot.py | jayantabh/haxball-imitation-learning | fb02203dee6859443ac2bd4334144aacc9f16f89 | [
"MIT"
] | 2 | 2021-07-24T10:15:27.000Z | 2021-09-09T03:25:07.000Z | import replay
import torch
import os
from bots import interactive
from models.BasicModel import BasicModel
class BasicBot(interactive.Interactive):
def __init__(self, channel_id, name):
super().__init__(channel_id)
# Load pre-trained model and set-up the bot
self.model = BasicModel()
path = os.path.join( os.getcwd(), 'saved_models', name )
self.model.load_state_dict(torch.load(path, map_location=torch.device('cpu')))
self.model.eval()
def onUpdate(self):
if self.player and len(self.game.players) == 2:
# convert game state to tensor
# tensor must be same format as how network was trained
print(self.player.team)
# forming input only works for two players currently
state = [self.player.disc.x, self.player.disc.y, self.player.disc.vx, self.player.disc.vy]
for player in self.game.players:
if player.id != self.player.id:
state.extend([player.disc.x, player.disc.y, player.disc.vx, player.disc.vy])
state.extend([self.game.ball.x, self.game.ball.y, self.game.ball.vx, self.game.ball.vy])
state_tensor = torch.tensor(state)
# get output for model
actions = self.model(state_tensor)
actions = (actions > 0.5).tolist()
# send input actions
inputs = [replay.Input(1 << idx) for idx,x in enumerate(actions) if x != 0]
self.setInput(*inputs) | 33.119048 | 96 | 0.675773 | import replay
import torch
import os
from bots import interactive
from models.BasicModel import BasicModel
class BasicBot(interactive.Interactive):
def __init__(self, channel_id, name):
super().__init__(channel_id)
self.model = BasicModel()
path = os.path.join( os.getcwd(), 'saved_models', name )
self.model.load_state_dict(torch.load(path, map_location=torch.device('cpu')))
self.model.eval()
def onUpdate(self):
if self.player and len(self.game.players) == 2:
print(self.player.team)
state = [self.player.disc.x, self.player.disc.y, self.player.disc.vx, self.player.disc.vy]
for player in self.game.players:
if player.id != self.player.id:
state.extend([player.disc.x, player.disc.y, player.disc.vx, player.disc.vy])
state.extend([self.game.ball.x, self.game.ball.y, self.game.ball.vx, self.game.ball.vy])
state_tensor = torch.tensor(state)
actions = self.model(state_tensor)
actions = (actions > 0.5).tolist()
inputs = [replay.Input(1 << idx) for idx,x in enumerate(actions) if x != 0]
self.setInput(*inputs) | true | true |
f73aef5b111b9d515bad6e9c823f71660de63790 | 2,337 | py | Python | tests/test_squeezedimd.py | hjmjohnson/MONAI | 7cd65614da81eeff261a14abdf18bd07a20abfcc | [
"Apache-2.0"
] | 1 | 2021-06-18T00:53:06.000Z | 2021-06-18T00:53:06.000Z | tests/test_squeezedimd.py | Transconnectome/MONAI | dc7cd0ec25d4b27f321a31f13e707769922c66b3 | [
"Apache-2.0"
] | null | null | null | tests/test_squeezedimd.py | Transconnectome/MONAI | dc7cd0ec25d4b27f321a31f13e707769922c66b3 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
from parameterized import parameterized
from monai.transforms import SqueezeDimd
TEST_CASE_1 = [
{"keys": ["img", "seg"], "dim": None},
{"img": np.random.rand(1, 2, 1, 3), "seg": np.random.randint(0, 2, size=[1, 2, 1, 3])},
(2, 3),
]
TEST_CASE_2 = [
{"keys": ["img", "seg"], "dim": 2},
{"img": np.random.rand(1, 2, 1, 8, 16), "seg": np.random.randint(0, 2, size=[1, 2, 1, 8, 16])},
(1, 2, 8, 16),
]
TEST_CASE_3 = [
{"keys": ["img", "seg"], "dim": -1},
{"img": np.random.rand(1, 1, 16, 8, 1), "seg": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])},
(1, 1, 16, 8),
]
TEST_CASE_4 = [
{"keys": ["img", "seg"]},
{"img": np.random.rand(1, 2, 1, 3), "seg": np.random.randint(0, 2, size=[1, 2, 1, 3])},
(2, 3),
]
TEST_CASE_5 = [
{"keys": ["img", "seg"], "dim": -2},
{"img": np.random.rand(1, 1, 16, 8, 1), "seg": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])},
]
TEST_CASE_6 = [
{"keys": ["img", "seg"], "dim": 0.5},
{"img": np.random.rand(1, 1, 16, 8, 1), "seg": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])},
]
class TestSqueezeDim(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4])
def test_shape(self, input_param, test_data, expected_shape):
result = SqueezeDimd(**input_param)(test_data)
self.assertTupleEqual(result["img"].shape, expected_shape)
self.assertTupleEqual(result["seg"].shape, expected_shape)
@parameterized.expand([TEST_CASE_5, TEST_CASE_6])
def test_invalid_inputs(self, input_param, test_data):
with self.assertRaises(AssertionError):
result = SqueezeDimd(**input_param)(test_data)
if __name__ == "__main__":
unittest.main()
| 34.367647 | 99 | 0.632007 |
import unittest
import numpy as np
from parameterized import parameterized
from monai.transforms import SqueezeDimd
TEST_CASE_1 = [
{"keys": ["img", "seg"], "dim": None},
{"img": np.random.rand(1, 2, 1, 3), "seg": np.random.randint(0, 2, size=[1, 2, 1, 3])},
(2, 3),
]
TEST_CASE_2 = [
{"keys": ["img", "seg"], "dim": 2},
{"img": np.random.rand(1, 2, 1, 8, 16), "seg": np.random.randint(0, 2, size=[1, 2, 1, 8, 16])},
(1, 2, 8, 16),
]
TEST_CASE_3 = [
{"keys": ["img", "seg"], "dim": -1},
{"img": np.random.rand(1, 1, 16, 8, 1), "seg": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])},
(1, 1, 16, 8),
]
TEST_CASE_4 = [
{"keys": ["img", "seg"]},
{"img": np.random.rand(1, 2, 1, 3), "seg": np.random.randint(0, 2, size=[1, 2, 1, 3])},
(2, 3),
]
TEST_CASE_5 = [
{"keys": ["img", "seg"], "dim": -2},
{"img": np.random.rand(1, 1, 16, 8, 1), "seg": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])},
]
TEST_CASE_6 = [
{"keys": ["img", "seg"], "dim": 0.5},
{"img": np.random.rand(1, 1, 16, 8, 1), "seg": np.random.randint(0, 2, size=[1, 1, 16, 8, 1])},
]
class TestSqueezeDim(unittest.TestCase):
@parameterized.expand([TEST_CASE_1, TEST_CASE_2, TEST_CASE_3, TEST_CASE_4])
def test_shape(self, input_param, test_data, expected_shape):
result = SqueezeDimd(**input_param)(test_data)
self.assertTupleEqual(result["img"].shape, expected_shape)
self.assertTupleEqual(result["seg"].shape, expected_shape)
@parameterized.expand([TEST_CASE_5, TEST_CASE_6])
def test_invalid_inputs(self, input_param, test_data):
with self.assertRaises(AssertionError):
result = SqueezeDimd(**input_param)(test_data)
if __name__ == "__main__":
unittest.main()
| true | true |
f73aef67d32337296bdc1edb4168fbb05501865d | 10,641 | py | Python | gradio/networking.py | WittmannF/gradio | db42e5b3dd22f5a60975a6eda6c81abfbb592cde | [
"Apache-2.0"
] | null | null | null | gradio/networking.py | WittmannF/gradio | db42e5b3dd22f5a60975a6eda6c81abfbb592cde | [
"Apache-2.0"
] | null | null | null | gradio/networking.py | WittmannF/gradio | db42e5b3dd22f5a60975a6eda6c81abfbb592cde | [
"Apache-2.0"
] | null | null | null | """
Defines helper methods useful for setting up ports, launching servers, and handling `ngrok`
"""
import os
import socket
import threading
from flask import Flask, request, jsonify, abort, send_file, render_template
from flask_cachebuster import CacheBuster
from flask_cors import CORS
import threading
import pkg_resources
from distutils import dir_util
import time
import json
import urllib.request
from shutil import copyfile
import requests
import sys
import csv
import logging
import gradio as gr
from gradio.embeddings import calculate_similarity, fit_pca_to_embeddings, transform_with_pca
from gradio.tunneling import create_tunnel
INITIAL_PORT_VALUE = int(os.getenv(
'GRADIO_SERVER_PORT', "7860")) # The http server will try to open on port 7860. If not available, 7861, 7862, etc.
TRY_NUM_PORTS = int(os.getenv(
'GRADIO_NUM_PORTS', "100")) # Number of ports to try before giving up and throwing an exception.
LOCALHOST_NAME = os.getenv(
'GRADIO_SERVER_NAME', "127.0.0.1")
GRADIO_API_SERVER = "https://api.gradio.app/v1/tunnel-request"
GRADIO_FEATURE_ANALYTICS_URL = "https://api.gradio.app/gradio-feature-analytics/"
STATIC_TEMPLATE_LIB = pkg_resources.resource_filename("gradio", "templates/")
STATIC_PATH_LIB = pkg_resources.resource_filename("gradio", "static/")
GRADIO_STATIC_ROOT = "https://gradio.app"
app = Flask(__name__,
template_folder=STATIC_TEMPLATE_LIB,
static_folder=STATIC_PATH_LIB,
static_url_path="/static/")
CORS(app)
cache_buster = CacheBuster(config={'extensions': ['.js', '.css'], 'hash_size': 5})
cache_buster.init_app(app)
app.app_globals = {}
# Hide Flask default message
cli = sys.modules['flask.cli']
cli.show_server_banner = lambda *x: None
def set_meta_tags(title, description, thumbnail):
app.app_globals.update({
"title": title,
"description": description,
"thumbnail": thumbnail
})
def set_config(config):
app.app_globals["config"] = config
def get_local_ip_address():
try:
ip_address = requests.get('https://api.ipify.org').text
except requests.ConnectionError:
ip_address = "No internet connection"
return ip_address
IP_ADDRESS = get_local_ip_address()
def get_first_available_port(initial, final):
"""
Gets the first open port in a specified range of port numbers
:param initial: the initial value in the range of port numbers
:param final: final (exclusive) value in the range of port numbers, should be greater than `initial`
:return:
"""
for port in range(initial, final):
try:
s = socket.socket() # create a socket object
s.bind((LOCALHOST_NAME, port)) # Bind to the port
s.close()
return port
except OSError:
pass
raise OSError(
"All ports from {} to {} are in use. Please close a port.".format(
initial, final
)
)
@app.route("/", methods=["GET"])
def main():
return render_template("index.html",
title=app.app_globals["title"],
description=app.app_globals["description"],
thumbnail=app.app_globals["thumbnail"],
vendor_prefix=(GRADIO_STATIC_ROOT if app.interface.share else "")
)
@app.route("/config/", methods=["GET"])
def config():
return jsonify(app.app_globals["config"])
@app.route("/enable_sharing/<path:path>", methods=["GET"])
def enable_sharing(path):
if path == "None":
path = None
app.app_globals["config"]["share_url"] = path
return jsonify(success=True)
@app.route("/api/predict/", methods=["POST"])
def predict():
raw_input = request.json["data"]
prediction, durations = app.interface.process(raw_input)
output = {"data": prediction, "durations": durations}
return jsonify(output)
def log_feature_analytics(feature):
if app.interface.analytics_enabled:
try:
requests.post(GRADIO_FEATURE_ANALYTICS_URL,
data={
'ip_address': IP_ADDRESS,
'feature': feature})
except requests.ConnectionError:
pass # do not push analytics if no network
@app.route("/api/score_similarity/", methods=["POST"])
def score_similarity():
raw_input = request.json["data"]
preprocessed_input = [input_interface.preprocess(raw_input[i])
for i, input_interface in enumerate(app.interface.input_interfaces)]
input_embedding = app.interface.embed(preprocessed_input)
scores = list()
for example in app.interface.examples:
preprocessed_example = [iface.preprocess(iface.preprocess_example(example))
for iface, example in zip(app.interface.input_interfaces, example)]
example_embedding = app.interface.embed(preprocessed_example)
scores.append(calculate_similarity(input_embedding, example_embedding))
log_feature_analytics('score_similarity')
return jsonify({"data": scores})
@app.route("/api/view_embeddings/", methods=["POST"])
def view_embeddings():
sample_embedding = []
if "data" in request.json:
raw_input = request.json["data"]
preprocessed_input = [input_interface.preprocess(raw_input[i])
for i, input_interface in enumerate(app.interface.input_interfaces)]
sample_embedding.append(app.interface.embed(preprocessed_input))
example_embeddings = []
for example in app.interface.examples:
preprocessed_example = [iface.preprocess(iface.preprocess_example(example))
for iface, example in zip(app.interface.input_interfaces, example)]
example_embedding = app.interface.embed(preprocessed_example)
example_embeddings.append(example_embedding)
pca_model, embeddings_2d = fit_pca_to_embeddings(sample_embedding + example_embeddings)
sample_embedding_2d = embeddings_2d[:len(sample_embedding)]
example_embeddings_2d = embeddings_2d[len(sample_embedding):]
app.pca_model = pca_model
log_feature_analytics('view_embeddings')
return jsonify({"sample_embedding_2d": sample_embedding_2d, "example_embeddings_2d": example_embeddings_2d})
@app.route("/api/update_embeddings/", methods=["POST"])
def update_embeddings():
sample_embedding, sample_embedding_2d = [], []
if "data" in request.json:
raw_input = request.json["data"]
preprocessed_input = [input_interface.preprocess(raw_input[i])
for i, input_interface in enumerate(app.interface.input_interfaces)]
sample_embedding.append(app.interface.embed(preprocessed_input))
sample_embedding_2d = transform_with_pca(app.pca_model, sample_embedding)
return jsonify({"sample_embedding_2d": sample_embedding_2d})
@app.route("/api/predict_examples/", methods=["POST"])
def predict_examples():
example_ids = request.json["data"]
predictions_set = {}
for example_id in example_ids:
example_set = app.interface.examples[example_id]
processed_example_set = [iface.preprocess_example(example)
for iface, example in zip(app.interface.input_interfaces, example_set)]
try:
predictions, _ = app.interface.process(processed_example_set)
except:
continue
predictions_set[example_id] = predictions
output = {"data": predictions_set}
return jsonify(output)
@app.route("/api/flag/", methods=["POST"])
def flag():
log_feature_analytics('flag')
flag_path = os.path.join(app.cwd, app.interface.flagging_dir)
os.makedirs(flag_path,
exist_ok=True)
output = {'inputs': [app.interface.input_interfaces[
i].rebuild(
flag_path, request.json['data']['input_data'][i]) for i
in range(len(app.interface.input_interfaces))],
'outputs': [app.interface.output_interfaces[
i].rebuild(
flag_path, request.json['data']['output_data'][i])
for i
in range(len(app.interface.output_interfaces))]}
log_fp = "{}/log.csv".format(flag_path)
is_new = not os.path.exists(log_fp)
with open(log_fp, "a") as csvfile:
headers = ["input_{}".format(i) for i in range(len(
output["inputs"]))] + ["output_{}".format(i) for i in
range(len(output["outputs"]))]
writer = csv.DictWriter(csvfile, delimiter=',',
lineterminator='\n',
fieldnames=headers)
if is_new:
writer.writeheader()
writer.writerow(
dict(zip(headers, output["inputs"] +
output["outputs"]))
)
return jsonify(success=True)
@app.route("/api/interpret/", methods=["POST"])
def interpret():
log_feature_analytics('interpret')
raw_input = request.json["data"]
interpretation_scores, alternative_outputs = app.interface.interpret(raw_input)
return jsonify({
"interpretation_scores": interpretation_scores,
"alternative_outputs": alternative_outputs
})
@app.route("/file/<path:path>", methods=["GET"])
def file(path):
return send_file(os.path.join(app.cwd, path))
def start_server(interface, server_name, server_port=None):
if server_port is None:
server_port = INITIAL_PORT_VALUE
port = get_first_available_port(
server_port, server_port + TRY_NUM_PORTS
)
app.interface = interface
app.cwd = os.getcwd()
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
if interface.save_to is not None:
interface.save_to["port"] = port
thread = threading.Thread(target=app.run,
kwargs={"port": port, "host": server_name},
daemon=True)
thread.start()
return port, app, thread
def close_server(process):
process.terminate()
process.join()
def url_request(url):
try:
req = urllib.request.Request(
url=url, headers={"content-type": "application/json"}
)
res = urllib.request.urlopen(req, timeout=10)
return res
except Exception as e:
raise RuntimeError(str(e))
def setup_tunnel(local_server_port):
response = url_request(GRADIO_API_SERVER)
if response and response.code == 200:
try:
payload = json.loads(response.read().decode("utf-8"))[0]
return create_tunnel(payload, LOCALHOST_NAME, local_server_port)
except Exception as e:
raise RuntimeError(str(e))
def url_ok(url):
try:
r = requests.head(url)
return r.status_code == 200
except ConnectionError:
return False
| 34.436893 | 119 | 0.672493 |
import os
import socket
import threading
from flask import Flask, request, jsonify, abort, send_file, render_template
from flask_cachebuster import CacheBuster
from flask_cors import CORS
import threading
import pkg_resources
from distutils import dir_util
import time
import json
import urllib.request
from shutil import copyfile
import requests
import sys
import csv
import logging
import gradio as gr
from gradio.embeddings import calculate_similarity, fit_pca_to_embeddings, transform_with_pca
from gradio.tunneling import create_tunnel
INITIAL_PORT_VALUE = int(os.getenv(
'GRADIO_SERVER_PORT', "7860"))
TRY_NUM_PORTS = int(os.getenv(
'GRADIO_NUM_PORTS', "100"))
LOCALHOST_NAME = os.getenv(
'GRADIO_SERVER_NAME', "127.0.0.1")
GRADIO_API_SERVER = "https://api.gradio.app/v1/tunnel-request"
GRADIO_FEATURE_ANALYTICS_URL = "https://api.gradio.app/gradio-feature-analytics/"
STATIC_TEMPLATE_LIB = pkg_resources.resource_filename("gradio", "templates/")
STATIC_PATH_LIB = pkg_resources.resource_filename("gradio", "static/")
GRADIO_STATIC_ROOT = "https://gradio.app"
app = Flask(__name__,
template_folder=STATIC_TEMPLATE_LIB,
static_folder=STATIC_PATH_LIB,
static_url_path="/static/")
CORS(app)
cache_buster = CacheBuster(config={'extensions': ['.js', '.css'], 'hash_size': 5})
cache_buster.init_app(app)
app.app_globals = {}
cli = sys.modules['flask.cli']
cli.show_server_banner = lambda *x: None
def set_meta_tags(title, description, thumbnail):
app.app_globals.update({
"title": title,
"description": description,
"thumbnail": thumbnail
})
def set_config(config):
app.app_globals["config"] = config
def get_local_ip_address():
try:
ip_address = requests.get('https://api.ipify.org').text
except requests.ConnectionError:
ip_address = "No internet connection"
return ip_address
IP_ADDRESS = get_local_ip_address()
def get_first_available_port(initial, final):
for port in range(initial, final):
try:
s = socket.socket()
s.bind((LOCALHOST_NAME, port))
s.close()
return port
except OSError:
pass
raise OSError(
"All ports from {} to {} are in use. Please close a port.".format(
initial, final
)
)
@app.route("/", methods=["GET"])
def main():
return render_template("index.html",
title=app.app_globals["title"],
description=app.app_globals["description"],
thumbnail=app.app_globals["thumbnail"],
vendor_prefix=(GRADIO_STATIC_ROOT if app.interface.share else "")
)
@app.route("/config/", methods=["GET"])
def config():
return jsonify(app.app_globals["config"])
@app.route("/enable_sharing/<path:path>", methods=["GET"])
def enable_sharing(path):
if path == "None":
path = None
app.app_globals["config"]["share_url"] = path
return jsonify(success=True)
@app.route("/api/predict/", methods=["POST"])
def predict():
raw_input = request.json["data"]
prediction, durations = app.interface.process(raw_input)
output = {"data": prediction, "durations": durations}
return jsonify(output)
def log_feature_analytics(feature):
if app.interface.analytics_enabled:
try:
requests.post(GRADIO_FEATURE_ANALYTICS_URL,
data={
'ip_address': IP_ADDRESS,
'feature': feature})
except requests.ConnectionError:
pass
@app.route("/api/score_similarity/", methods=["POST"])
def score_similarity():
raw_input = request.json["data"]
preprocessed_input = [input_interface.preprocess(raw_input[i])
for i, input_interface in enumerate(app.interface.input_interfaces)]
input_embedding = app.interface.embed(preprocessed_input)
scores = list()
for example in app.interface.examples:
preprocessed_example = [iface.preprocess(iface.preprocess_example(example))
for iface, example in zip(app.interface.input_interfaces, example)]
example_embedding = app.interface.embed(preprocessed_example)
scores.append(calculate_similarity(input_embedding, example_embedding))
log_feature_analytics('score_similarity')
return jsonify({"data": scores})
@app.route("/api/view_embeddings/", methods=["POST"])
def view_embeddings():
sample_embedding = []
if "data" in request.json:
raw_input = request.json["data"]
preprocessed_input = [input_interface.preprocess(raw_input[i])
for i, input_interface in enumerate(app.interface.input_interfaces)]
sample_embedding.append(app.interface.embed(preprocessed_input))
example_embeddings = []
for example in app.interface.examples:
preprocessed_example = [iface.preprocess(iface.preprocess_example(example))
for iface, example in zip(app.interface.input_interfaces, example)]
example_embedding = app.interface.embed(preprocessed_example)
example_embeddings.append(example_embedding)
pca_model, embeddings_2d = fit_pca_to_embeddings(sample_embedding + example_embeddings)
sample_embedding_2d = embeddings_2d[:len(sample_embedding)]
example_embeddings_2d = embeddings_2d[len(sample_embedding):]
app.pca_model = pca_model
log_feature_analytics('view_embeddings')
return jsonify({"sample_embedding_2d": sample_embedding_2d, "example_embeddings_2d": example_embeddings_2d})
@app.route("/api/update_embeddings/", methods=["POST"])
def update_embeddings():
sample_embedding, sample_embedding_2d = [], []
if "data" in request.json:
raw_input = request.json["data"]
preprocessed_input = [input_interface.preprocess(raw_input[i])
for i, input_interface in enumerate(app.interface.input_interfaces)]
sample_embedding.append(app.interface.embed(preprocessed_input))
sample_embedding_2d = transform_with_pca(app.pca_model, sample_embedding)
return jsonify({"sample_embedding_2d": sample_embedding_2d})
@app.route("/api/predict_examples/", methods=["POST"])
def predict_examples():
example_ids = request.json["data"]
predictions_set = {}
for example_id in example_ids:
example_set = app.interface.examples[example_id]
processed_example_set = [iface.preprocess_example(example)
for iface, example in zip(app.interface.input_interfaces, example_set)]
try:
predictions, _ = app.interface.process(processed_example_set)
except:
continue
predictions_set[example_id] = predictions
output = {"data": predictions_set}
return jsonify(output)
@app.route("/api/flag/", methods=["POST"])
def flag():
log_feature_analytics('flag')
flag_path = os.path.join(app.cwd, app.interface.flagging_dir)
os.makedirs(flag_path,
exist_ok=True)
output = {'inputs': [app.interface.input_interfaces[
i].rebuild(
flag_path, request.json['data']['input_data'][i]) for i
in range(len(app.interface.input_interfaces))],
'outputs': [app.interface.output_interfaces[
i].rebuild(
flag_path, request.json['data']['output_data'][i])
for i
in range(len(app.interface.output_interfaces))]}
log_fp = "{}/log.csv".format(flag_path)
is_new = not os.path.exists(log_fp)
with open(log_fp, "a") as csvfile:
headers = ["input_{}".format(i) for i in range(len(
output["inputs"]))] + ["output_{}".format(i) for i in
range(len(output["outputs"]))]
writer = csv.DictWriter(csvfile, delimiter=',',
lineterminator='\n',
fieldnames=headers)
if is_new:
writer.writeheader()
writer.writerow(
dict(zip(headers, output["inputs"] +
output["outputs"]))
)
return jsonify(success=True)
@app.route("/api/interpret/", methods=["POST"])
def interpret():
log_feature_analytics('interpret')
raw_input = request.json["data"]
interpretation_scores, alternative_outputs = app.interface.interpret(raw_input)
return jsonify({
"interpretation_scores": interpretation_scores,
"alternative_outputs": alternative_outputs
})
@app.route("/file/<path:path>", methods=["GET"])
def file(path):
return send_file(os.path.join(app.cwd, path))
def start_server(interface, server_name, server_port=None):
if server_port is None:
server_port = INITIAL_PORT_VALUE
port = get_first_available_port(
server_port, server_port + TRY_NUM_PORTS
)
app.interface = interface
app.cwd = os.getcwd()
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
if interface.save_to is not None:
interface.save_to["port"] = port
thread = threading.Thread(target=app.run,
kwargs={"port": port, "host": server_name},
daemon=True)
thread.start()
return port, app, thread
def close_server(process):
process.terminate()
process.join()
def url_request(url):
try:
req = urllib.request.Request(
url=url, headers={"content-type": "application/json"}
)
res = urllib.request.urlopen(req, timeout=10)
return res
except Exception as e:
raise RuntimeError(str(e))
def setup_tunnel(local_server_port):
response = url_request(GRADIO_API_SERVER)
if response and response.code == 200:
try:
payload = json.loads(response.read().decode("utf-8"))[0]
return create_tunnel(payload, LOCALHOST_NAME, local_server_port)
except Exception as e:
raise RuntimeError(str(e))
def url_ok(url):
try:
r = requests.head(url)
return r.status_code == 200
except ConnectionError:
return False
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.