hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7955dfe1a60b85a7b4187b22614ddd49e49992d8 | 2,569 | py | Python | plugins/cpu/taskswitch.py | BH1SCW/lmp | 2c054b22868af07a11439b785dfeb04e01d31c02 | [
"Apache-2.0"
] | 159 | 2020-04-15T16:41:06.000Z | 2022-03-30T08:12:00.000Z | plugins/cpu/taskswitch.py | BH1SCW/lmp | 2c054b22868af07a11439b785dfeb04e01d31c02 | [
"Apache-2.0"
] | 82 | 2020-04-16T10:42:42.000Z | 2022-02-18T13:08:39.000Z | plugins/cpu/taskswitch.py | Teanix/lmp | 94b4be742674f831df22120afe458c98c8349f35 | [
"Apache-2.0"
] | 76 | 2020-04-14T07:39:52.000Z | 2022-02-21T05:43:37.000Z | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
from __future__ import print_function
from bcc import BPF
from time import sleep, strftime
# for influxdb
import sys
sys.path.append('./plugins/common/')
from init_db import influx_client
from db_modules import write2db
from const import DatabaseType
from datetime import datetime
bpf_text = """
#include <uapi/linux/ptrace.h>
#include <linux/sched.h>
struct key_t {
u32 cpu;
u32 pid;
u32 tgid;
};
BPF_HASH(start, struct key_t);
BPF_HASH(dist, struct key_t);
int switch_start(struct pt_regs *ctx)
{
u64 ts = bpf_ktime_get_ns();
u64 pid_tgid = bpf_get_current_pid_tgid();
struct key_t key;
key.cpu = bpf_get_smp_processor_id();
key.pid = pid_tgid;
key.tgid = pid_tgid >> 32;
start.update(&key, &ts);
return 0;
}
int switch_end(struct pt_regs *ctx, struct task_struct *prev)
{
u64 ts = bpf_ktime_get_ns();
struct key_t key;
u64 *value;
u64 delta;
key.cpu = bpf_get_smp_processor_id();
key.pid = prev->pid;
key.tgid = prev->tgid;
value = start.lookup(&key);
if (value == 0) {
return 0;
}
delta = ts - *value;
start.delete(&key);
dist.increment(key, delta);
return 0;
}
"""
# data structure from template
class lmp_data(object):
def __init__(self,a,b,c,d,e):
self.time = a
self.glob = b
self.cpu = c
self.pid = d
self.duration = e
data_struct = {"measurement":'taskswitch',
"time":[],
"tags":['glob','cpu','pid',],
"fields":['duration']}
b = BPF(text=bpf_text)
b.attach_kretprobe(event="pick_next_task_fair", fn_name="switch_start")
b.attach_kretprobe(event="pick_next_task_idle", fn_name="switch_start")
b.attach_kretprobe(event="pick_next_task_rt", fn_name="switch_start")
b.attach_kretprobe(event="pick_next_task_dl", fn_name="switch_start")
b.attach_kretprobe(event="pick_next_task_stop", fn_name="switch_start")
b.attach_kprobe(event="finish_task_switch", fn_name="switch_end")
dist = b.get_table("dist")
#print("%-6s%-6s%-6s%-6s" % ("CPU", "PID", "TGID", "TIME(ns)"))
while (1):
try:
sleep(1)
for k, v in dist.items():
#print("%-6d%-6d%-6d%-6d" % (k.cpu, k.pid, k.tgid, v.value))
test_data = lmp_data(datetime.now().isoformat(),'glob', k.cpu, k.pid, v.value)
write2db(data_struct, test_data, influx_client, DatabaseType.INFLUXDB.value)
dist.items()
except KeyboardInterrupt:
exit()
| 23.354545 | 90 | 0.632931 |
7955e08b3acd26c15bd695e8550096871ef773e4 | 646 | py | Python | demo.py | jraza19/docopt_demo | c5dff8356759c201204f660b812b250082cc099b | [
"MIT"
] | null | null | null | demo.py | jraza19/docopt_demo | c5dff8356759c201204f660b812b250082cc099b | [
"MIT"
] | null | null | null | demo.py | jraza19/docopt_demo | c5dff8356759c201204f660b812b250082cc099b | [
"MIT"
] | null | null | null | # author: Tiffany Timbers
# modified by: Javairia Raza
# date: 2020-11-19
"""This script prints out docopt args.
Usage: docopt.py <arg1> --arg2=<arg2> [--arg3=<arg3>] [--arg4=<arg4>]
Options:
<arg> Takes any value (this is a required positional argument)
--arg2=<arg2> Takes any value (this is a required argument)
[--arg3=<arg3>] Takes any value (this is an optional argument)
[--arg4=<arg4>] Takes any value (this is an optional argument)
"""
from docopt import docopt
opt = docopt(__doc__)
def main(opt):
return print(opt), print(type(opt)), print(opt["--arg4"])
if __name__ == "__main__":
main(opt)
| 26.916667 | 74 | 0.657895 |
7955e2ff49e99606467342c7563e109fcc8ba4ed | 801 | py | Python | tests/test_docs.py | guillaumegenthial/doc2md | 7ad5b8944e85d88acd76c7d7d2d3550d076d6c56 | [
"Apache-2.0"
] | null | null | null | tests/test_docs.py | guillaumegenthial/doc2md | 7ad5b8944e85d88acd76c7d7d2d3550d076d6c56 | [
"Apache-2.0"
] | 1 | 2019-10-20T01:59:05.000Z | 2019-10-20T01:59:05.000Z | tests/test_docs.py | guillaumegenthial/doc2md | 7ad5b8944e85d88acd76c7d7d2d3550d076d6c56 | [
"Apache-2.0"
] | null | null | null | """Tests that output is the same as expected"""
__author__ = "Guillaume Genthial"
from pathlib import Path
EXPECTED = 'tests/mydummypackage/docs'
GOT = 'tests/mydummypackage/test-docs'
FILES = [
'bar.md',
'foo.bar.md',
]
EXPECTED_NESTED = 'tests/mydummypackage/docs-nested'
GOT_NESTED = 'tests/mydummypackage/test-docs-nested'
FILES_NESTED = [
'bar.md',
'foo/bar.md',
]
def test_docs():
for file in FILES:
with Path(EXPECTED, file).open() as expected, \
Path(GOT, file).open() as got:
assert expected.read() == got.read()
def test_docs_nested():
for file in FILES_NESTED:
with Path(EXPECTED_NESTED, file).open() as expected, \
Path(GOT_NESTED, file).open() as got:
assert expected.read() == got.read()
| 22.885714 | 62 | 0.637953 |
7955e327c9f7060d3eb18383e4c3ea47f7df4c02 | 6,081 | py | Python | teabag_app.py | alma-frankenstein/Other-projects | ec9972125310c3cdeb353e317a94d1baca287b3a | [
"MIT"
] | null | null | null | teabag_app.py | alma-frankenstein/Other-projects | ec9972125310c3cdeb353e317a94d1baca287b3a | [
"MIT"
] | null | null | null | teabag_app.py | alma-frankenstein/Other-projects | ec9972125310c3cdeb353e317a94d1baca287b3a | [
"MIT"
] | null | null | null | # teabag app
from flask import Flask, render_template
import random as r
import os
app = Flask(__name__)
partsOfSpeech = {'nouns1': ['an aura', 'an accomplishment', 'the love', 'the life', 'the soul'],
'nouns2': ['respect', 'compassion', 'kindness', 'love', 'life', 'knowledge', 'strength',
'generosity', 'love', 'goodness', 'strength',
'belief', 'light', 'love', 'happiness', 'love', 'love', 'everything', 'trust', 'heart'],
'adverbs': ['righteously', 'sincerely'],
'verbs': ['live', 'sing', 'love', 'love', 'live', 'love', 'love', 'give', 'speak', 'speak', 'create',
'intend', 'intend', 'respect'],
'adjectives': ['happy', 'sacred', 'good', 'compassionate', 'giving', 'forgiving', 'loving', 'joyful',
'sincere']
}
phraseDict = {
0: f"You are {r.choice(partsOfSpeech['adjectives'])}",
1: f"{r.choice(partsOfSpeech['verbs']).title()} {r.choice(partsOfSpeech['adverbs'])}; you will build up {r.choice(partsOfSpeech['nouns1'])} of {r.choice(partsOfSpeech['nouns2'])}",
2: f"{r.choice(partsOfSpeech['verbs']).title()} to make yourself {r.choice(partsOfSpeech['adjectives'])}",
3: f"{r.choice(partsOfSpeech['nouns2']).title()} is {r.choice(partsOfSpeech['nouns1'])}",
4: f"It is not to talk of {r.choice(partsOfSpeech['nouns2'])} but to {r.choice(partsOfSpeech['verbs'])} {r.choice(partsOfSpeech['nouns2'])} that is {r.choice(partsOfSpeech['nouns2'])}",
5: f"{r.choice(partsOfSpeech['nouns2']).title()} is for now, {r.choice(partsOfSpeech['nouns2'])} is for the future",
6: f"{r.choice(partsOfSpeech['verbs']).title()} what you {r.choice(partsOfSpeech['verbs'])}, {r.choice(partsOfSpeech['verbs'])} what you {r.choice(partsOfSpeech['verbs'])}",
7: f"Your {r.choice(partsOfSpeech['nouns2'])} is your own {r.choice(partsOfSpeech['nouns2'])}",
8: f"{r.choice(partsOfSpeech['nouns2']).title()} has no limit, {r.choice(partsOfSpeech['nouns2'])} has no enemy",
9: f"{r.choice(partsOfSpeech['verbs']).title()} yourself so that you may know to to {r.choice(partsOfSpeech['verbs'])} with {r.choice(partsOfSpeech['nouns2'])}",
10: f"You don't need {r.choice(partsOfSpeech['nouns2'])} if you are {r.choice(partsOfSpeech['nouns2'])}",
11: f"{r.choice(partsOfSpeech['verbs']).title()} the sequence of {r.choice(partsOfSpeech['nouns2'])}, the consequences will always be {r.choice(partsOfSpeech['adjectives'])}",
12: f"People who {r.choice(partsOfSpeech['verbs'])} are {r.choice(partsOfSpeech['adjectives'])}",
13: f"Be {r.choice(partsOfSpeech['adjectives'])}",
14: f"{r.choice(partsOfSpeech['nouns2']).title()} is the constant state of {r.choice(partsOfSpeech['nouns2'])} for others",
15: f"{r.choice(partsOfSpeech['verbs']).title()} by your inner {r.choice(partsOfSpeech['nouns2'])}",
16: f"Develop the power of {r.choice(partsOfSpeech['nouns2'])}",
17: f"People who {r.choice(partsOfSpeech['verbs'])} are {r.choice(partsOfSpeech['adjectives'])}",
18: f"The principal ingredient of {r.choice(partsOfSpeech['nouns2'])} is {r.choice(partsOfSpeech['nouns2'])}",
19: "You're already dead",
20: f"{r.choice(partsOfSpeech['nouns1']).title()} of {r.choice(partsOfSpeech['nouns2'])}",
21: f"You are {r.choice(partsOfSpeech['adjectives'])}",
22: f"{r.choice(partsOfSpeech['verbs']).title()} {r.choice(partsOfSpeech['adverbs'])}; you will build up {r.choice(partsOfSpeech['nouns1'])} of {r.choice(partsOfSpeech['nouns2'])}",
23: f"{r.choice(partsOfSpeech['verbs']).title()} to make yourself {r.choice(partsOfSpeech['adjectives'])}",
24: f"{r.choice(partsOfSpeech['nouns2']).title()} is {r.choice(partsOfSpeech['nouns1'])}",
25: f"It is not to talk of {r.choice(partsOfSpeech['nouns2'])} but to {r.choice(partsOfSpeech['verbs'])} {r.choice(partsOfSpeech['nouns2'])} that is {r.choice(partsOfSpeech['nouns2'])}",
26: f"{r.choice(partsOfSpeech['nouns2']).title()} is for now, {r.choice(partsOfSpeech['nouns2'])} is for the future",
27: f"{r.choice(partsOfSpeech['verbs']).title()} what you {r.choice(partsOfSpeech['verbs'])}, {r.choice(partsOfSpeech['verbs'])} what you {r.choice(partsOfSpeech['verbs'])}",
28: f"Your {r.choice(partsOfSpeech['nouns2'])} is your own {r.choice(partsOfSpeech['nouns2'])}",
29: f"{r.choice(partsOfSpeech['nouns2']).title()} has no limit, {r.choice(partsOfSpeech['nouns2'])} has no enemy",
30: f"{r.choice(partsOfSpeech['verbs']).title()} yourself so that you may know to to {r.choice(partsOfSpeech['verbs'])} with {r.choice(partsOfSpeech['nouns2'])}",
31: f"You don't need {r.choice(partsOfSpeech['nouns2'])} if you are {r.choice(partsOfSpeech['nouns2'])}",
32: f"{r.choice(partsOfSpeech['verbs']).title()} the sequence of {r.choice(partsOfSpeech['nouns2'])}, the consequences will always be {r.choice(partsOfSpeech['adjectives'])}",
33: f"People who {r.choice(partsOfSpeech['verbs'])} are {r.choice(partsOfSpeech['adjectives'])}",
34: f"Be {r.choice(partsOfSpeech['adjectives'])}",
35: f"{r.choice(partsOfSpeech['nouns2']).title()} is the constant state of {r.choice(partsOfSpeech['nouns2'])} for others",
36: f"{r.choice(partsOfSpeech['verbs']).title()} by your inner {r.choice(partsOfSpeech['nouns2'])}",
37: f"Develop the power of {r.choice(partsOfSpeech['nouns2'])}",
38: f"People who {r.choice(partsOfSpeech['verbs'])} are {r.choice(partsOfSpeech['adjectives'])}",
39: f"The principal ingredient of {r.choice(partsOfSpeech['nouns2'])} is {r.choice(partsOfSpeech['nouns2'])}",
40: f"{r.choice(partsOfSpeech['nouns1']).title()} of {r.choice(partsOfSpeech['nouns2'])}",
}
@app.route('/') # endpoint of domain name
def teaBagger():
phrases = list(range(len(phraseDict)))
phraseKey = r.choice(phrases)
sentence = phraseDict[phraseKey]
return render_template('teasite.jinja2', sentence=sentence)
if __name__ == '__main__':
app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 5000)))
| 81.08 | 190 | 0.651866 |
7955e38d68dff0a302b7b39a4363434c879cb672 | 440 | py | Python | aiostorage/providers/exceptions.py | family-guy/aiostorage | 266826caf811e96ba110c4bff26ad1889a8e9f62 | [
"MIT"
] | null | null | null | aiostorage/providers/exceptions.py | family-guy/aiostorage | 266826caf811e96ba110c4bff26ad1889a8e9f62 | [
"MIT"
] | 3 | 2018-12-25T13:53:11.000Z | 2019-02-10T13:52:31.000Z | aiostorage/providers/exceptions.py | grking8/aiostorage | 266826caf811e96ba110c4bff26ad1889a8e9f62 | [
"MIT"
] | null | null | null | """
This module contains exceptions for object storage provider API errors.
"""
class ProviderError(Exception):
"""
Base exception class.
"""
class ProviderGetUploadUrlError(ProviderError):
"""
Unable to get file upload URL.
"""
class ProviderAuthorizationError(ProviderError):
"""
Unable to authorize.
"""
class ProviderFileUploadError(ProviderError):
"""
Unable to upload file.
"""
| 15.714286 | 71 | 0.665909 |
7955e5d155c37b63d920469c60858d14a098f726 | 835 | py | Python | molecule/test/scenarios/driver/podman/molecule/multi-node/tests/test_bar.py | brianhelba/molecule | fbe61c0337c038464b4592ce1904e4880a9b18c1 | [
"MIT"
] | 3 | 2020-05-19T21:20:35.000Z | 2022-03-20T12:11:06.000Z | molecule/test/scenarios/driver/podman/molecule/multi-node/tests/test_bar.py | brianhelba/molecule | fbe61c0337c038464b4592ce1904e4880a9b18c1 | [
"MIT"
] | null | null | null | molecule/test/scenarios/driver/podman/molecule/multi-node/tests/test_bar.py | brianhelba/molecule | fbe61c0337c038464b4592ce1904e4880a9b18c1 | [
"MIT"
] | 3 | 2020-05-22T03:30:50.000Z | 2022-03-19T08:35:31.000Z | """Testinfra tests."""
import os
import re
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ["MOLECULE_INVENTORY_FILE"]
).get_hosts("bar")
def test_hostname(host):
"""Validate hostname."""
assert re.search(r"instance-1.*", host.check_output("hostname -s"))
def test_etc_molecule_directory(host):
"""Validate molecule directory."""
f = host.file("/etc/molecule")
assert f.is_directory
assert f.user == "root"
assert f.group == "root"
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
"""Validate molecule file."""
f = host.file("/etc/molecule/{}".format(host.check_output("hostname -s")))
assert f.is_file
assert f.user == "root"
assert f.group == "root"
assert f.mode == 0o644
| 23.194444 | 78 | 0.68503 |
7955e67be520754f11c02908dd9c90a4e86282ec | 5,115 | py | Python | app.py | SBelkaid/applied_tm | a22e089027f4f1ccd550aca4ae110f5f6833cb76 | [
"BSD-4-Clause-UC"
] | null | null | null | app.py | SBelkaid/applied_tm | a22e089027f4f1ccd550aca4ae110f5f6833cb76 | [
"BSD-4-Clause-UC"
] | 3 | 2021-03-31T19:59:53.000Z | 2021-12-13T20:42:22.000Z | app.py | SBelkaid/applied_tm | a22e089027f4f1ccd550aca4ae110f5f6833cb76 | [
"BSD-4-Clause-UC"
] | null | null | null | from models import db_session, User
from flask import Flask, request, url_for, redirect, render_template
from models import Claim
from models import Document
from models import Attribution
from models import Entity
from models import Perspective
from flask_login import current_user
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
import sqlalchemy
import flask_login
login_manager = flask_login.LoginManager()
app = Flask(__name__)
login_manager.init_app(app)
app.secret_key = 'AGhauch3woo5xee'
client = app.test_client()
users = {'p.t.j.m.vossen@vu.nl': {'password': 'xxxxxx'}}
session = db_session()
analyser = SentimentIntensityAnalyzer()
CLAIMS_ATTRIBUTIONS = {doc_id:sent_id for sent_id, doc_id in
session.query(Claim.sent_id, Claim.doc_id).filter(Attribution.sent_id == Claim.sent_id,
Attribution.doc_id == Claim.doc_id).all()}
ENTITIES = {e[0].lower(): e[1] for e in set(session.query(Entity.value, Entity.type).all())}
@login_manager.user_loader
def user_loader(email):
if email not in users:
return
user = User()
user.id = email
return user
@login_manager.request_loader
def request_loader(request):
email = request.form.get('email')
if email not in users:
return
user = User()
user.id = email
# DO NOT ever store passwords in plaintext and always compare password
# hashes using constant-time comparison!
user.is_authenticated = request.form['password'] == users[email]['password']
return user
@app.route('/login', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
email = request.form['email']
if request.form['password'] == users[email]['password']:
user = User()
user.id = email
flask_login.login_user(user)
return redirect(url_for('viewer'))
if request.method == 'GET' and current_user.is_authenticated is False:
return render_template('login.html')
return redirect(url_for('viewer'))
class PerspectiveViewer:
def __init__(self, serialized_perspective):
self.mapping = serialized_perspective['term_to_word']
self.statement = serialized_perspective['statement']
self.cue = serialized_perspective['cue']
self.opinion_info = serialized_perspective['opinion_info']
self.roles_span = serialized_perspective['roles_span']
self.source_entity = serialized_perspective['source_entity']
self.doc_id = serialized_perspective['doc_id']
self.sentiment = serialized_perspective['sentiment']
def get_key(self, tid):
roles = [key for (key, value) in self.roles_span.items() if tid in value]
if not roles:
if self.mapping[tid] == self.cue:
return "CUE"
return None
return roles.pop()
def get_opinion_info(self):
return [f"<p>expression: {opinion['expression']}, target: {opinion['target']}, polarity: {opinion['polarity']}</p>" for opinion in self.opinion_info]
def construct_statement(self):
return [(self.mapping[token_id], token_id) for token_id in sorted(self.mapping, key=lambda x: int(x[1:]))]
@app.route('/viewer/<int:doc_id>', methods=['GET'])
@app.route('/viewer', methods=['GET'])
@flask_login.login_required
def viewer(doc_id=None):
all_docs = ([doc.id, doc.name] for doc in Document.query.all())
try:
if doc_id:
doc = Perspective.query.filter_by(doc_id=doc_id).all()
article = Document.query.filter_by(id=doc_id).one().name
claims = [c.serialize for c in Claim.query.filter_by(doc_id=doc_id).all()]
attributions = [a.serialize for a in doc]
perspectives = [PerspectiveViewer(pers.serialize) for pers in doc]
entities = [a.serialize for a in Entity.query.filter_by(doc_id=doc_id).all()]
raw_text = Document.query.filter_by(id=doc_id).one().serialize
return render_template('viewer.html', doc_name=article, raw_text=raw_text, claims=claims,
attributions=attributions, doc_nav=all_docs,
perspectives=perspectives)
except sqlalchemy.orm.exc.NoResultFound as e:
return render_template('404.html'), 404
return render_template('viewer.html', doc_nav=all_docs)
@app.route('/logout')
def logout():
flask_login.logout_user()
return redirect(url_for('login'))
@app.route('/')
def index():
return redirect(url_for('login'))
class AuthError(Exception):
def __init__(self, error, status_code):
self.error = error
self.status_code = status_code
@login_manager.unauthorized_handler
def unauthorized_handler():
return render_template('403.html')
# raise AuthError({"code": "unathorized",
# "description": "Not allowed"}, 403)
@app.teardown_appcontext
def shutdown_session(exception=None):
db_session.remove()
if __name__ == '__main__':
app.run(threaded=True, debug=True, port=8999)
| 34.795918 | 157 | 0.668231 |
7955e6f7f5f0f71a29b8b421612179b90055e71d | 4,847 | py | Python | tests/pipelines/test_optuna.py | gr33n-made/catalyst | bd413abc908ef7cbdeab42b0e805277a791e3ddb | [
"Apache-2.0"
] | 1 | 2021-09-29T20:30:50.000Z | 2021-09-29T20:30:50.000Z | tests/pipelines/test_optuna.py | gr33n-made/catalyst | bd413abc908ef7cbdeab42b0e805277a791e3ddb | [
"Apache-2.0"
] | null | null | null | tests/pipelines/test_optuna.py | gr33n-made/catalyst | bd413abc908ef7cbdeab42b0e805277a791e3ddb | [
"Apache-2.0"
] | null | null | null | # flake8: noqa
import os
from tempfile import TemporaryDirectory
from pytest import mark
import torch
from torch import nn
from torch.utils.data import DataLoader
from catalyst import dl
from catalyst.contrib.datasets import MNIST
from catalyst.data import ToTensor
from catalyst.settings import IS_CUDA_AVAILABLE, NUM_CUDA_DEVICES, SETTINGS
if SETTINGS.optuna_required:
import optuna
def train_experiment(device, engine=None):
with TemporaryDirectory() as logdir:
def objective(trial):
lr = trial.suggest_loguniform("lr", 1e-3, 1e-1)
num_hidden = int(trial.suggest_loguniform("num_hidden", 32, 128))
loaders = {
"train": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()),
batch_size=32,
),
"valid": DataLoader(
MNIST(os.getcwd(), train=False, download=True, transform=ToTensor()),
batch_size=32,
),
}
model = nn.Sequential(
nn.Flatten(), nn.Linear(784, num_hidden), nn.ReLU(), nn.Linear(num_hidden, 10)
)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()
runner = dl.SupervisedRunner(
input_key="features", output_key="logits", target_key="targets"
)
runner.train(
engine=engine or dl.DeviceEngine(device),
model=model,
criterion=criterion,
optimizer=optimizer,
loaders=loaders,
callbacks={
"optuna": dl.OptunaPruningCallback(
loader_key="valid", metric_key="accuracy01", minimize=False, trial=trial
),
"accuracy": dl.AccuracyCallback(
input_key="logits", target_key="targets", num_classes=10
),
},
num_epochs=2,
)
score = trial.best_score
return score
study = optuna.create_study(
direction="maximize",
pruner=optuna.pruners.MedianPruner(
n_startup_trials=1, n_warmup_steps=0, interval_steps=1
),
)
study.optimize(objective, n_trials=3, timeout=300)
print(study.best_value, study.best_params)
# Torch
@mark.skipif(not SETTINGS.optuna_required, reason="catalyst[optuna] in not required")
def test_on_cpu():
train_experiment("cpu")
@mark.skipif(
not (IS_CUDA_AVAILABLE and SETTINGS.optuna_required), reason="CUDA device is not available"
)
def test_on_torch_cuda0():
train_experiment("cuda:0")
@mark.skipif(
not (SETTINGS.optuna_required and IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2),
reason="No CUDA>=2 found",
)
def test_on_torch_cuda1():
train_experiment("cuda:1")
@mark.skipif(
not (SETTINGS.optuna_required and IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2),
reason="No CUDA>=2 found",
)
def test_on_torch_dp():
train_experiment(None, dl.DataParallelEngine())
# @mark.skipif(
# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >=2),
# reason="No CUDA>=2 found",
# )
# def test_on_ddp():
# train_experiment(None, dl.DistributedDataParallelEngine())
# AMP
@mark.skipif(
not (SETTINGS.optuna_required and IS_CUDA_AVAILABLE and SETTINGS.amp_required),
reason="No CUDA or AMP found",
)
def test_on_amp():
train_experiment(None, dl.AMPEngine())
@mark.skipif(
not (
SETTINGS.optuna_required
and IS_CUDA_AVAILABLE
and NUM_CUDA_DEVICES >= 2
and SETTINGS.amp_required
),
reason="No CUDA>=2 or AMP found",
)
def test_on_amp_dp():
train_experiment(None, dl.DataParallelAMPEngine())
# @mark.skipif(
# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.amp_required),
# reason="No CUDA>=2 or AMP found",
# )
# def test_on_amp_ddp():
# train_experiment(None, dl.DistributedDataParallelAMPEngine())
# APEX
@mark.skipif(
not (SETTINGS.optuna_required and IS_CUDA_AVAILABLE and SETTINGS.apex_required),
reason="No CUDA or Apex found",
)
def test_on_apex():
train_experiment(None, dl.APEXEngine())
@mark.skipif(
not (
SETTINGS.optuna_required
and IS_CUDA_AVAILABLE
and NUM_CUDA_DEVICES >= 2
and SETTINGS.apex_required
),
reason="No CUDA>=2 or Apex found",
)
def test_on_apex_dp():
train_experiment(None, dl.DataParallelAPEXEngine())
# @mark.skipif(
# not (IS_CUDA_AVAILABLE and NUM_CUDA_DEVICES >= 2 and SETTINGS.apex_required),
# reason="No CUDA>=2 or Apex found",
# )
# def test_on_apex_ddp():
# train_experiment(None, dl.DistributedDataParallelApexEngine())
| 28.85119 | 96 | 0.63008 |
7955e8d4373596741b3ba1ef3a5a2eb8af2453a8 | 1,905 | py | Python | pl_bolts/losses/object_detection.py | lavoiems/lightning-bolts | 208e92ba3dcdbc029afd37e09ec9461fbcf3f293 | [
"Apache-2.0"
] | 822 | 2020-04-21T03:30:43.000Z | 2021-03-07T06:41:31.000Z | pl_bolts/losses/object_detection.py | lavoiems/lightning-bolts | 208e92ba3dcdbc029afd37e09ec9461fbcf3f293 | [
"Apache-2.0"
] | 538 | 2020-04-18T01:07:58.000Z | 2021-03-09T13:48:50.000Z | pl_bolts/losses/object_detection.py | lavoiems/lightning-bolts | 208e92ba3dcdbc029afd37e09ec9461fbcf3f293 | [
"Apache-2.0"
] | 162 | 2020-04-17T15:44:54.000Z | 2021-03-09T14:04:02.000Z | """Loss functions for Object Detection task."""
from torch import Tensor
from pl_bolts.metrics.object_detection import giou, iou
def iou_loss(preds: Tensor, target: Tensor) -> Tensor:
"""Calculates the intersection over union loss.
Args:
preds: batch of prediction bounding boxes with representation ``[x_min, y_min, x_max, y_max]``
target: batch of target bounding boxes with representation ``[x_min, y_min, x_max, y_max]``
Example:
>>> import torch
>>> from pl_bolts.losses.object_detection import iou_loss
>>> preds = torch.tensor([[100, 100, 200, 200]])
>>> target = torch.tensor([[150, 150, 250, 250]])
>>> iou_loss(preds, target)
tensor([[0.8571]])
Returns:
IoU loss
"""
loss = 1 - iou(preds, target)
return loss
def giou_loss(preds: Tensor, target: Tensor) -> Tensor:
"""Calculates the generalized intersection over union loss.
It has been proposed in `Generalized Intersection over Union: A Metric and A
Loss for Bounding Box Regression <https://arxiv.org/abs/1902.09630>`_.
Args:
preds: an Nx4 batch of prediction bounding boxes with representation ``[x_min, y_min, x_max, y_max]``
target: an Mx4 batch of target bounding boxes with representation ``[x_min, y_min, x_max, y_max]``
Example:
>>> import torch
>>> from pl_bolts.losses.object_detection import giou_loss
>>> preds = torch.tensor([[100, 100, 200, 200]])
>>> target = torch.tensor([[150, 150, 250, 250]])
>>> giou_loss(preds, target)
tensor([[1.0794]])
Returns:
GIoU loss in an NxM tensor containing the pairwise GIoU loss for every element in preds and target,
where N is the number of prediction bounding boxes and M is the number of target bounding boxes
"""
loss = 1 - giou(preds, target)
return loss
| 34.017857 | 109 | 0.650394 |
7955e927b4eaad327fa96b50841bbabf0c762a82 | 1,276 | py | Python | rc_django/decorators.py | uw-it-aca/uw-restclients-django-utils | 1b2e883f422c256a5ef6e3737f4dde65cc3b1e96 | [
"Apache-2.0"
] | null | null | null | rc_django/decorators.py | uw-it-aca/uw-restclients-django-utils | 1b2e883f422c256a5ef6e3737f4dde65cc3b1e96 | [
"Apache-2.0"
] | 119 | 2017-03-22T04:48:56.000Z | 2021-11-12T18:55:40.000Z | rc_django/decorators.py | uw-it-aca/uw-restclients-django-utils | 1b2e883f422c256a5ef6e3737f4dde65cc3b1e96 | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.utils.module_loading import import_string
from django.shortcuts import render
def restclient_admin_required(view_func):
"""
View decorator that checks whether the user is permitted to view proxy
restclients. Calls login_required in case the user is not authenticated.
"""
def wrapper(request, *args, **kwargs):
template = 'access_denied.html'
if hasattr(settings, 'RESTCLIENTS_ADMIN_AUTH_MODULE'):
auth_func = import_string(settings.RESTCLIENTS_ADMIN_AUTH_MODULE)
else:
context = {'error_msg': (
"Your application must define an authorization function as "
"RESTCLIENTS_ADMIN_AUTH_MODULE in settings.py.")}
return render(request, template, context=context, status=401)
service = args[0] if len(args) > 0 else None
url = args[1] if len(args) > 1 else None
if auth_func(request, service, url):
return view_func(request, *args, **kwargs)
return render(request, template, status=401)
return login_required(function=wrapper)
| 38.666667 | 77 | 0.695925 |
7955e9a4dd2be386f70bdc5cf60c73999e7ba43d | 13,883 | py | Python | tests/iaas_classic/models/IPReservations/test_iaas_ip_reservations_model.py | ericmharris/gc3-query | 0bf5226130aafbb1974aeb96d93ee1996833e87d | [
"MIT"
] | null | null | null | tests/iaas_classic/models/IPReservations/test_iaas_ip_reservations_model.py | ericmharris/gc3-query | 0bf5226130aafbb1974aeb96d93ee1996833e87d | [
"MIT"
] | null | null | null | tests/iaas_classic/models/IPReservations/test_iaas_ip_reservations_model.py | ericmharris/gc3-query | 0bf5226130aafbb1974aeb96d93ee1996833e87d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
gc3-query.test_iaas_ip_reservations_model [9/13/2018 10:49 AM]
~~~~~~~~~~~~~~~~
<DESCR SHORT>
<DESCR>
"""
################################################################################
## Standard Library Imports
import sys, os
################################################################################
## Third-Party Imports
from dataclasses import dataclass
################################################################################
## Project Imports
from gc3_query.lib import *
_debug, _info, _warning, _error, _critical = get_logging(name=__name__)
# -*- coding: utf-8 -*-
"""
gc3-query.test_iaas_ip_reservations_model [9/7/2018 3:22 PM]
~~~~~~~~~~~~~~~~
<DESCR SHORT>
<DESCR>
"""
################################################################################
## Standard Library Imports
import sys, os
################################################################################
## Third-Party Imports
from dataclasses import dataclass
################################################################################
## Project Imports
from gc3_query.lib import *
from gc3_query.lib.iaas_classic.models.ip_reservations_model import IPReservationModel
_debug, _info, _warning, _error, _critical = get_logging(name=__name__)
from pathlib import Path
import pytest
from mongoengine import connect
from mongoengine import QuerySet
from gc3_query.lib import gc3_cfg
from gc3_query.lib.gc3_config import GC3Config
from gc3_query.lib.iaas_classic.ip_reservations import IPReservations
from gc3_query.lib.base_collections import NestedOrderedDictAttrListBase
# fixme? from gc3_query.lib.open_api import API_SPECS_DIR
import json
from pathlib import Path
from pymongo import MongoClient
import pytest
from bravado_core.spec import Spec
from bravado.response import BravadoResponse, BravadoResponseMetadata
import mongoengine
from mongoengine.connection import get_connection, register_connection
from gc3_query.lib import *
from gc3_query.lib import gc3_cfg
# from gc3_query.lib.export_delegates.mongodb import storage_adapter_init
# # fixme? from gc3_query.lib.open_api import API_SPECS_DIR
from pathlib import Path
from gc3_query.lib import *
import pytest
# from pprint import pprint, pformat
_debug, _info, _warning, _error, _critical = get_logging(name=__name__)
TEST_BASE_DIR: Path = Path(__file__).parent
# config_dir = TEST_BASE_DIR.joinpath("config")
config_dir = gc3_cfg.BASE_DIR.joinpath("etc/config")
output_dir = TEST_BASE_DIR.joinpath('output')
def test_setup():
assert TEST_BASE_DIR.exists()
# assert API_SPECS_DIR.exists()
if not config_dir.exists():
config_dir.mkdir()
if not output_dir.exists():
output_dir.mkdir()
def test_list_ip_reservations_model_from_url():
service = 'IPReservations'
idm_domain = 'gc30003'
gc3_config = GC3Config(atoml_config_dir=config_dir)
service_cfg = gc3_config.iaas_classic.services.compute[service]
idm_cfg = gc3_config.idm.domains[idm_domain]
# http_client: IaaSRequestsHTTPClient = IaaSRequestsHTTPClient(idm_cfg=idm_cfg)
assert service==service_cfg.name
assert idm_domain==idm_cfg.name
assert gc3_config.user.cloud_username == 'eric.harris@oracle.com'
ip_reservations = IPReservations(service_cfg=service_cfg, idm_cfg=idm_cfg, from_url=True)
# container=ip_reservations.idm_container_name.replace('/', '')
container=ip_reservations.idm_root_container_name
old_container=ip_reservations.idm_container_name
# http_future = ip_reservations.bravado_service_operations.listInstance(container=ip_reservations.idm_user_container_name)
# http_future = ip_reservations.bravado_service_operations.listInstance(container=ip_reservations.idm_container_name)
# http_future = ip_reservations.bravado_service_operations.listIPReservation(container=container)
http_future = ip_reservations.service_operations.list_ip_reservation(container=container)
# http_future = ip_reservations.service_operations.discover_root_instance()
request_url = http_future.future.request.url
service_response = http_future.response()
result = service_response.result
result_json = service_response.incoming_response.json()
assert service_response.metadata.status_code==200
assert len(result_json['result']) > 0
assert 'src_list' in result_json['result'][0]
def test_list_ip_reservations_model_save_from_url():
service = 'IPReservations'
idm_domain = 'gc30003'
gc3_config = GC3Config(atoml_config_dir=config_dir)
service_cfg = gc3_config.iaas_classic.services.compute[service]
idm_cfg = gc3_config.idm.domains[idm_domain]
# http_client: IaaSRequestsHTTPClient = IaaSRequestsHTTPClient(idm_cfg=idm_cfg)
assert service==service_cfg.name
assert idm_domain==idm_cfg.name
assert gc3_config.user.cloud_username == 'eric.harris@oracle.com'
ip_reservations = IPReservations(service_cfg=service_cfg, idm_cfg=idm_cfg, from_url=True)
# container=ip_reservations.idm_container_name.replace('/', '')
container=ip_reservations.idm_root_container_name
old_container=ip_reservations.idm_container_name
# http_future = ip_reservations.bravado_service_operations.listInstance(container=ip_reservations.idm_user_container_name)
# http_future = ip_reservations.bravado_service_operations.listInstance(container=ip_reservations.idm_container_name)
# http_future = instances.bravado_service_operations.discoverRootInstance(_request_options={"headers": {"Accept": "application/oracle-compute-v3+directory+json"}})
# operation_name = 'listIPReservation'
# callable_operation = getattr(ip_reservations.bravado_service_operations, operation_name)
# operation_headers = {"Accept": ','.join(callable_operation.operation.produces),
# "Content-Type": ','.join(callable_operation.operation.consumes)
# }
# ip_reservations.http_client.session.headers.update(operation_headers)
# # http_future = callable_operation(container=container, _request_options={"headers": {"Accept": ','.join(callable_operation.operation.produces)}})
# http_future = callable_operation(container=container)
# http_future = ip_reservations.bravado_service_operations.listIPReservation(container=container, _request_options={"headers": {"Accept": ','.join(callable_operation.operation.produces)}})
http_future = ip_reservations.service_operations.list_ip_reservation(container=container)
# http_future = ip_reservations.service_operations.discover_root_instance()
request_url = http_future.future.request.url
service_response = http_future.response()
result = service_response.result
assert service_response.metadata.status_code==200
results_json = service_response.incoming_response.json()['result']
assert len(results_json) > 0
result_json = results_json[0]
assert 'src_list' in result_json
result_json['ip_reservation_id'] = result_json.pop('id')
ip_reservation_model = IPReservationModel(**result_json)
saved = ip_reservation_model.save()
assert saved
# def storage_adapter_init():
# alias = gc3_cfg.iaas_classic.mongodb.db_alias
# name = gc3_cfg.iaas_classic.mongodb.db_name
# server = gc3_cfg.iaas_classic.mongodb.net.listen_address
# port = gc3_cfg.iaas_classic.mongodb.net.listen_port
# db_config = dict(host=server, port=port, alias=alias, name=name)
# db_config['register'] = mongoengine.register_connection(**db_config)
# _info(f"connection registered: alias={alias}, name={name}, db_config={db_config})")
# return db_config
# @pytest.fixture()
# def setup_gc30003_model() -> Tuple[Dict[str, Any]]:
# service = 'ServiceInstances'
# idm_domain = 'gc30003'
# iaas_type = 'database'
# service_cfg = gc3_cfg.iaas_classic.services.get(iaas_type)[service]
# idm_cfg = gc3_cfg.idm.domains[idm_domain]
# mongodb_config = storage_adapter_init()
# assert service==service_cfg.name
# assert idm_domain==idm_cfg.name
# assert gc3_cfg.user.cloud_username == 'eric.harris@oracle.com'
# yield service_cfg, idm_cfg, mongodb_config
def storage_adapter_init(mongodb_config: DictStrAny) -> MongoClient:
"""
mongoengine.register_connection(alias, db=None, name=None, host=None, port=None, read_preference=Primary(), username=None, password=None, authentication_source=None, authentication_mechanism=None, **kwargs)
Add a connection.
Parameters:
alias – the name that will be used to refer to this connection throughout MongoEngine
name – the name of the specific database to use
db – the name of the database to use, for compatibility with connect
host – the host name of the mongod instance to connect to
port – the port that the mongod instance is running on
read_preference – The read preference for the collection ** Added pymongo 2.1
username – username to authenticate with
password – password to authenticate with
authentication_source – database to authenticate against
authentication_mechanism – database authentication mechanisms. By default, use SCRAM-SHA-1 with MongoDB 3.0 and later, MONGODB-CR (MongoDB Challenge Response protocol) for older servers.
is_mock – explicitly use mongomock for this connection (can also be done by using mongomock:// as db host prefix)
kwargs – ad-hoc parameters to be passed into the pymongo driver, for example maxpoolsize, tz_aware, etc. See the documentation for pymongo’s MongoClient for a full list.
:return:
"""
alias = mongodb_config["alias"]
name = mongodb_config["name"]
db = mongodb_config["db"]
host = mongodb_config["net"]["host"]
port = mongodb_config["net"]["port"]
_ = register_connection(alias=alias, db=db, host=host, port=port)
# _connection = connect(db=db, alias=alias)
connection: MongoClient = get_connection(alias=alias)
_info(f"connection registered: alias={alias}, name={name}, db={db}, host={host}, port={port}")
return connection
@pytest.fixture()
def setup_gc30003_model():
service = 'IPReservations'
idm_domain = 'gc30003'
gc3_config = GC3Config(atoml_config_dir=config_dir)
service_cfg = gc3_config.iaas_classic.services.compute[service]
idm_cfg = gc3_config.idm.domains[idm_domain]
mongodb_connection: MongoClient = storage_adapter_init(mongodb_config=gc3_cfg.iaas_classic.mongodb.as_dict())
iaas_service = IPReservations(service_cfg=service_cfg, idm_cfg=idm_cfg)
assert service==service_cfg.name
assert idm_domain==idm_cfg.name
assert gc3_config.user.cloud_username == 'eric.harris@oracle.com'
yield service_cfg, idm_cfg, iaas_service, mongodb_connection
def test_dump(setup_gc30003_model):
service_cfg, idm_cfg, iaas_service, mongodb_connection = setup_gc30003_model
# http_client: IaaSRequestsHTTPClient = IaaSRequestsHTTPClient(idm_cfg=idm_cfg)
service_response = iaas_service.dump()
assert service_response.result
result = service_response.result
def test_save_one(setup_gc30003_model):
service_cfg, idm_cfg, iaas_service, mongodb_connection = setup_gc30003_model
# http_client: IaaSRequestsHTTPClient = IaaSRequestsHTTPClient(idm_cfg=idm_cfg)
service_response = iaas_service.dump()
assert service_response.result
result = service_response.result
results = service_response.result.result
result_dict = service_response.incoming_response.json()
first_result = results[0]
first_result_dict = first_result._as_dict()
ip_reservation_model = IPReservationModel(**first_result_dict)
saved = ip_reservation_model.save()
assert saved
def test_save_all(setup_gc30003_model):
service_cfg, idm_cfg, iaas_service, mongodb_connection = setup_gc30003_model
# http_client: IaaSRequestsHTTPClient = IaaSRequestsHTTPClient(idm_cfg=idm_cfg)
service_response = iaas_service.dump()
assert service_response.result
results = service_response.result.result
for result in results:
result_dict = result._as_dict()
ip_reservation_model = IPReservationModel(**result_dict)
saved = ip_reservation_model.save()
def test_insert_all(setup_gc30003_model):
service_cfg, idm_cfg, iaas_service, mongodb_connection = setup_gc30003_model
# http_client: IaaSRequestsHTTPClient = IaaSRequestsHTTPClient(idm_cfg=idm_cfg)
service_response = iaas_service.dump()
assert service_response.result
results = service_response.result.result
ip_reservations = [IPReservationModel(**result._as_dict()) for result in results]
_ = IPReservationModel.objects().insert(ip_reservations)
assert ip_reservations
@pytest.fixture()
def setup_gc30003_model_query():
service = 'IPReservations'
idm_domain = 'gc30003'
gc3_config = GC3Config(atoml_config_dir=config_dir)
service_cfg = gc3_config.iaas_classic.services.compute[service]
idm_cfg = gc3_config.idm.domains[idm_domain]
mongodb_connection: MongoClient = storage_adapter_init(mongodb_config=gc3_cfg.iaas_classic.mongodb.as_dict())
iaas_service = IPReservations(service_cfg=service_cfg, idm_cfg=idm_cfg)
assert service==service_cfg.name
assert idm_domain==idm_cfg.name
assert gc3_config.user.cloud_username == 'eric.harris@oracle.com'
yield service_cfg, idm_cfg, iaas_service, mongodb_connection
def test_query_objects(setup_gc30003_model_query):
service_cfg, idm_cfg, iaas_service, mongodb_connection = setup_gc30003_model_query
# http_client: IaaSRequestsHTTPClient = IaaSRequestsHTTPClient(idm_cfg=idm_cfg)
objects: QuerySet = IPReservationModel.objects()
assert objects
object = objects.first()
assert object
owner = object.name.object_owner
full_name = object.name.full_name
assert '@oracle.com' in owner
assert full_name.startswith('/Compute')
| 42.197568 | 210 | 0.742779 |
7955e9c510067fa51b80282cca698f2eb849d19a | 1,412 | py | Python | tests/test_matcher_not.py | sanjioh/django-header-filter | d348449619c71bdd6a2c957ee47c1c67a57bdec2 | [
"MIT"
] | 11 | 2016-12-03T21:45:30.000Z | 2022-01-11T08:57:55.000Z | tests/test_matcher_not.py | sanjioh/django-header-filter | d348449619c71bdd6a2c957ee47c1c67a57bdec2 | [
"MIT"
] | 17 | 2019-07-12T20:36:40.000Z | 2020-01-09T15:03:40.000Z | tests/test_matcher_not.py | sanjioh/django-header-filter | d348449619c71bdd6a2c957ee47c1c67a57bdec2 | [
"MIT"
] | null | null | null | from header_filter.matchers import Header
def test_not_matcher_supports_bitwise_not(rf):
h_name, h_value = 'HTTP_X_A', 'val_x'
matcher = ~~Header(h_name, h_value)
request = rf.get('/', **{h_name: h_value})
assert matcher.match(request) is True
def test_not_matcher_supports_bitwise_and(rf):
h_name_1, h_value_1 = 'HTTP_X_A', 'val_x'
h_name_2, h_value_2 = 'HTTP_X_B', 'val_y'
matcher = ~Header(h_name_1, h_value_1) & Header(h_name_2, h_value_2)
request = rf.get('/', **{h_name_1: h_value_1, h_name_2: h_value_2})
assert matcher.match(request) is False
def test_not_matcher_supports_bitwise_or(rf):
h_name_1, h_value_1 = 'HTTP_X_A', 'val_x'
h_name_2, h_value_2 = 'HTTP_X_B', 'val_y'
matcher = ~Header(h_name_1, h_value_1) | Header(h_name_2, h_value_2)
request = rf.get('/', **{h_name_1: h_value_1, h_name_2: h_value_2})
assert matcher.match(request) is True
def test_not_matcher_supports_bitwise_xor(rf):
h_name_1, h_value_1 = 'HTTP_X_A', 'val_x'
h_name_2, h_value_2 = 'HTTP_X_B', 'val_y'
h_name_3, h_value_3 = 'HTTP_X_C', 'val_z'
matcher = ~Header(h_name_2, h_value_2) ^ Header(h_name_3, h_value_3)
request = rf.get('/', **{h_name_1: h_value_1, h_name_2: h_value_2, h_name_3: h_value_3})
assert matcher.match(request) is True
def test_repr():
assert repr(~Header('HTTP_X_A', 'val_x')) == "~Header('HTTP_X_A', 'val_x')"
| 37.157895 | 92 | 0.701133 |
7955ea17335a287450b6065b06014de98b658ae3 | 8,876 | py | Python | mlrun/runtimes/funcdoc.py | EdmondIguazio/mlrun | e63b34a610788ebe522ce7a46642e26927e39882 | [
"Apache-2.0"
] | null | null | null | mlrun/runtimes/funcdoc.py | EdmondIguazio/mlrun | e63b34a610788ebe522ce7a46642e26927e39882 | [
"Apache-2.0"
] | null | null | null | mlrun/runtimes/funcdoc.py | EdmondIguazio/mlrun | e63b34a610788ebe522ce7a46642e26927e39882 | [
"Apache-2.0"
] | 1 | 2021-05-05T14:19:46.000Z | 2021-05-05T14:19:46.000Z | # Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ast
import inspect
import re
from mlrun.runtimes.base import FunctionEntrypoint
def type_name(ann):
if ann is inspect.Signature.empty:
return ""
return getattr(ann, "__name__", str(ann))
def inspect_default(value):
if value is inspect.Signature.empty:
return ""
return repr(value)
def inspect_param(param: inspect.Parameter) -> dict:
name = param.name
typ = type_name(param.annotation)
default = inspect_default(param.default)
return param_dict(name, typ, "", default)
# We're using dict and not classes (here and in func_dict) since this goes
# directly to YAML
def param_dict(name="", type="", doc="", default=""):
return {
"default": default,
"doc": doc,
"name": name,
"type": type,
}
def func_dict(name, doc, params, returns, lineno):
return {
"name": name,
"doc": doc,
"params": params,
"return": returns,
"lineno": lineno,
}
def func_info(fn) -> dict:
sig = inspect.signature(fn)
doc = inspect.getdoc(fn) or ""
out = func_dict(
name=fn.__name__,
doc=doc,
params=[inspect_param(p) for p in sig.parameters.values()],
returns=param_dict(type=type_name(sig.return_annotation)),
lineno=func_lineno(fn),
)
if not fn.__doc__ or not fn.__doc__.strip():
return out
return merge_doc(out, doc)
def func_lineno(fn):
try:
return inspect.getsourcelines(fn)[1]
except (TypeError, OSError):
return -1
def merge_doc(out, doc):
doc, params, ret = parse_rst(doc)
out["doc"] = doc
for param in params:
for out_param in out["params"]:
if out_param["name"] != param["name"]:
continue
out_param["doc"] = param["doc"] or out_param["doc"]
out_param["type"] = param["type"] or out_param["type"]
break
out["return"]["doc"] = ret["doc"] or out["return"]["doc"]
out["return"]["type"] = ret["type"] or out["return"]["type"]
return out
def rst_read_doc(lines):
doc = []
for i, line in enumerate(lines):
if line[:1] == ":":
return i, "\n".join(doc).strip()
doc.append(line)
return -1, "\n".join(doc).strip()
def rst_read_section(lines, i):
# Skip empty lines/other lines
for i, line in enumerate(lines[i:], i):
if not line.strip():
continue
# :param path: The path of the file to wrap
match = re.match(r":\s*(\w+)(\s+\w+)?\s*:", lines[i])
if match:
break
else:
return None
tag = match.group(1)
value = match.group(2).strip() if match.group(2) else ""
text = lines[i][match.end() :].lstrip()
for i in range(i + 1, len(lines)):
if re.match(r"\t+| {3,}", lines[i]):
text += " " + lines[i].lstrip()
else:
return tag, value, text, i
return tag, value, text.strip(), -1
def parse_rst(docstring: str):
lines = docstring.splitlines()
i, doc = rst_read_doc(lines)
params, names = {}, []
ret = param_dict()
while i != -1:
out = rst_read_section(lines, i)
if not out:
break
tag, value, text, i = out
if tag == "param":
params[value] = param_dict(name=value, doc=text)
names.append(value)
elif tag == "type":
# TODO: Check param
params[value]["type"] = text
elif tag == "returns":
ret["doc"] = text
elif tag == "rtype":
ret["type"] = text
else:
raise ValueError(f"{i+1}: unknown tag - {lines[i]!r}")
params = [params[name] for name in names]
return doc, params, ret
def ast_func_info(func: ast.FunctionDef):
doc = ast.get_docstring(func) or ""
rtype = getattr(func.returns, "id", "")
params = [ast_param_dict(p) for p in func.args.args]
defaults = func.args.defaults
if defaults:
for param, default in zip(params[-len(defaults) :], defaults):
param["default"] = ast_code(default)
out = func_dict(
name=func.name,
doc=doc,
params=params,
returns=param_dict(type=rtype),
lineno=func.lineno,
)
if not doc.strip():
return out
return merge_doc(out, doc)
def ast_param_dict(param: ast.arg) -> dict:
return {
"name": param.arg,
"type": ann_type(param.annotation) if param.annotation else "",
"doc": "",
"default": "",
}
def ann_type(ann):
if hasattr(ann, "slice"):
name = ann.value.id
inner = ", ".join(ann_type(e) for e in iter_elems(ann.slice))
return f"{name}[{inner}]"
if isinstance(ann, ast.Attribute):
return ann.attr
return getattr(ann, "id", "")
def iter_elems(ann):
if hasattr(ann.value, "elts"):
return ann.value.elts
if not hasattr(ann, "slice"):
return [ann.value]
elif hasattr(ann.slice, "elts"):
return ann.slice.elts
elif hasattr(ann.slice, "value"):
return [ann.slice.value]
return []
class ASTVisitor(ast.NodeVisitor):
def __init__(self):
self.funcs = []
self.exprs = []
def generic_visit(self, node):
self.exprs.append(node)
super().generic_visit(node)
def visit_FunctionDef(self, node):
self.funcs.append(node)
self.generic_visit(node)
def find_handlers(code: str, handlers=None):
handlers = set() if handlers is None else set(handlers)
mod = ast.parse(code)
visitor = ASTVisitor()
visitor.visit(mod)
funcs = [ast_func_info(fn) for fn in visitor.funcs]
if handlers:
return [f for f in funcs if f["name"] in handlers]
else:
markers = find_handler_markers(code)
return filter_funcs(funcs, markers)
def as_func(handler):
ret = clean(handler["return"])
return FunctionEntrypoint(
name=handler["name"],
doc=handler["doc"],
parameters=[clean(p) for p in handler["params"]],
outputs=[ret] if ret else None,
lineno=handler["lineno"],
).to_dict()
def update_function_entry_points(function, code):
handlers = find_handlers(code)
function.spec.entry_points = {
handler["name"]: as_func(handler) for handler in handlers
}
def clean(struct: dict):
if not struct:
return None
if "default" in struct:
struct["default"] = py_eval(struct["default"])
return {k: v for k, v in struct.items() if v or k == "default"}
def py_eval(data):
try:
value = ast.literal_eval(data)
return value
except (SyntaxError, ValueError):
return data
def filter_funcs(funcs, markers):
markers = list(markers)
if not markers:
return [func for func in funcs if func["name"][0] != "_"]
return [func for func in funcs if is_marked(func, markers)]
def is_marked(func, markers):
for marker in markers:
if func["lineno"] - marker == 1:
return True
return False
def find_handler_markers(code: str):
for lnum, line in enumerate(code.splitlines(), 1):
# '# mlrun:handler'
if re.match(r"#\s*mlrun:handler", line):
yield lnum
def ast_code(expr):
# Sadly, not such built in
children = None
if isinstance(expr, ast.Dict):
children = zip(expr.keys, expr.values)
children = [f"{ast_code(k)}: {ast_code(v)}" for k, v in children]
inner = ", ".join(children)
return f"{{{inner}}}"
elif isinstance(expr, ast.Set):
start, end, children = "{", "}", expr.elts
if not children:
return "set()"
elif isinstance(expr, ast.Tuple):
start, end, children = "(", ")", expr.elts
elif isinstance(expr, ast.List):
start, end, children = "[", "]", expr.elts
elif isinstance(expr, ast.Call):
children = [ast_code(e) for e in expr.args]
children += [f"{k.arg}={ast_code(k.value)}" for k in expr.keywords]
inner = ", ".join(children)
return f"{expr.func.id}({inner})"
else: # Leaf (number, str ...)
return repr(getattr(expr, expr._fields[0]))
inner = ", ".join(ast_code(e) for e in children)
return f"{start}{inner}{end}"
| 26.978723 | 75 | 0.590469 |
7955ea5a03cae714b7b6b3bdc38bda8ed7d736f5 | 555 | py | Python | stage/configuration/test_dev_snapshot_replaying_origin.py | Sentienz/datacollector-tests | ca27988351dc3366488098b5db6c85a8be2f7b85 | [
"Apache-2.0"
] | null | null | null | stage/configuration/test_dev_snapshot_replaying_origin.py | Sentienz/datacollector-tests | ca27988351dc3366488098b5db6c85a8be2f7b85 | [
"Apache-2.0"
] | 1 | 2019-04-24T11:06:38.000Z | 2019-04-24T11:06:38.000Z | stage/configuration/test_dev_snapshot_replaying_origin.py | anubandhan/datacollector-tests | 301c024c66d68353735256b262b681dd05ba16cc | [
"Apache-2.0"
] | 2 | 2019-05-24T06:34:37.000Z | 2020-03-30T11:48:18.000Z | import pytest
from streamsets.testframework.decorators import stub
@stub
@pytest.mark.parametrize('stage_attributes', [{'on_record_error': 'DISCARD'},
{'on_record_error': 'STOP_PIPELINE'},
{'on_record_error': 'TO_ERROR'}])
def test_on_record_error(sdc_builder, sdc_executor, stage_attributes):
pass
@stub
def test_snapshot_file_path(sdc_builder, sdc_executor):
pass
@stub
def test_snapshot_stage_instance_name(sdc_builder, sdc_executor):
pass
| 24.130435 | 83 | 0.654054 |
7955eb3ac947af5ac70ba7d58aa356679abeac65 | 4,246 | py | Python | dvc/output/local.py | peter-schmidbauer/dvc | 363db9125fa85a8589724501a50a5d361c07a757 | [
"Apache-2.0"
] | null | null | null | dvc/output/local.py | peter-schmidbauer/dvc | 363db9125fa85a8589724501a50a5d361c07a757 | [
"Apache-2.0"
] | null | null | null | dvc/output/local.py | peter-schmidbauer/dvc | 363db9125fa85a8589724501a50a5d361c07a757 | [
"Apache-2.0"
] | null | null | null | import os
from schema import Optional, Or
from dvc.output.base import OutputDoesNotExistError, OutputIsNotFileOrDirError
from dvc.output.base import OutputAlreadyTrackedError
from dvc.dependency.local import DependencyLOCAL
from dvc.exceptions import DvcException
from dvc.istextfile import istextfile
class OutputLOCAL(DependencyLOCAL):
PARAM_CACHE = 'cache'
PARAM_METRIC = 'metric'
PARAM_METRIC_TYPE = 'type'
PARAM_METRIC_XPATH = 'xpath'
METRIC_SCHEMA = Or(None, bool,
{Optional(PARAM_METRIC_TYPE): Or(str, None),
Optional(PARAM_METRIC_XPATH): Or(str, None)})
DoesNotExistError = OutputDoesNotExistError
IsNotFileOrDirError = OutputIsNotFileOrDirError
def __init__(self,
stage,
path,
info=None,
remote=None,
cache=True,
metric=False):
super(OutputLOCAL, self).__init__(stage, path, info, remote=remote)
self.use_cache = cache
self.metric = metric
@property
def md5(self):
return self.info.get(self.project.cache.local.PARAM_MD5, None)
@property
def cache(self):
return self.project.cache.local.get(self.md5)
def dumpd(self):
ret = super(OutputLOCAL, self).dumpd()
ret[self.PARAM_CACHE] = self.use_cache
if isinstance(self.metric, dict):
if self.PARAM_METRIC_XPATH in self.metric and \
not self.metric[self.PARAM_METRIC_XPATH]:
del self.metric[self.PARAM_METRIC_XPATH]
if self.metric:
ret[self.PARAM_METRIC] = self.metric
return ret
def changed(self):
if not self.use_cache:
return super(OutputLOCAL, self).changed()
return self.project.cache.local.changed(self.path_info, self.info)
def checkout(self):
if not self.use_cache:
return
self.project.cache.local.checkout(self.path_info, self.info)
def _verify_metric(self):
if not self.metric:
return
if os.path.isdir(self.path):
msg = 'Directory \'{}\' cannot be used as metrics.'
raise DvcException(msg.format(self.rel_path))
if not istextfile(self.path):
msg = 'Binary file \'{}\' cannot be used as metrics.'
raise DvcException(msg.format(self.rel_path))
def save(self):
if not self.use_cache:
super(OutputLOCAL, self).save()
self._verify_metric()
msg = 'Output \'{}\' doesn\'t use cache. Skipping saving.'
self.project.logger.debug(msg.format(self.rel_path))
return
if not os.path.exists(self.path):
raise self.DoesNotExistError(self.rel_path)
if not os.path.isfile(self.path) and not os.path.isdir(self.path):
raise self.IsNotFileOrDirError(self.rel_path)
if (os.path.isfile(self.path) and os.path.getsize(self.path) == 0) or \
(os.path.isdir(self.path) and len(os.listdir(self.path)) == 0):
msg = "File/directory '{}' is empty.".format(self.rel_path)
self.project.logger.warn(msg)
if not self.changed():
msg = 'Output \'{}\' didn\'t change. Skipping saving.'
self.project.logger.debug(msg.format(self.rel_path))
return
if self.is_local:
if self.project.scm.is_tracked(self.path):
raise OutputAlreadyTrackedError(self.rel_path)
if self.use_cache:
self.project.scm.ignore(self.path)
self.info = self.project.cache.local.save(self.path_info)
def remove(self, ignore_remove=False):
self.remote.remove(self.path_info)
if ignore_remove and self.use_cache and self.is_local:
self.project.scm.ignore_remove(self.path)
def move(self, out):
if self.use_cache and self.is_local:
self.project.scm.ignore_remove(self.path)
self.remote.move(self.path_info, out.path_info)
self.path = out.path
self.path_info = out.path_info
self.save()
if self.use_cache and self.is_local:
self.project.scm.ignore(self.path)
| 32.661538 | 79 | 0.618935 |
7955eb745a973df9d6329048fd3e2d65cdb7d4a3 | 3,780 | py | Python | kubernetes/client/models/v1_non_resource_attributes.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | 1 | 2021-06-10T23:44:11.000Z | 2021-06-10T23:44:11.000Z | kubernetes/client/models/v1_non_resource_attributes.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1_non_resource_attributes.py | sgwilliams-ebsco/python | 35e6406536c96d4769ff7e2a02bf0fdcb902a509 | [
"Apache-2.0"
] | 1 | 2018-11-06T16:33:43.000Z | 2018-11-06T16:33:43.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.12.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1NonResourceAttributes(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'path': 'str',
'verb': 'str'
}
attribute_map = {
'path': 'path',
'verb': 'verb'
}
def __init__(self, path=None, verb=None):
"""
V1NonResourceAttributes - a model defined in Swagger
"""
self._path = None
self._verb = None
self.discriminator = None
if path is not None:
self.path = path
if verb is not None:
self.verb = verb
@property
def path(self):
"""
Gets the path of this V1NonResourceAttributes.
Path is the URL path of the request
:return: The path of this V1NonResourceAttributes.
:rtype: str
"""
return self._path
@path.setter
def path(self, path):
"""
Sets the path of this V1NonResourceAttributes.
Path is the URL path of the request
:param path: The path of this V1NonResourceAttributes.
:type: str
"""
self._path = path
@property
def verb(self):
"""
Gets the verb of this V1NonResourceAttributes.
Verb is the standard HTTP verb
:return: The verb of this V1NonResourceAttributes.
:rtype: str
"""
return self._verb
@verb.setter
def verb(self, verb):
"""
Sets the verb of this V1NonResourceAttributes.
Verb is the standard HTTP verb
:param verb: The verb of this V1NonResourceAttributes.
:type: str
"""
self._verb = verb
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1NonResourceAttributes):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 24.387097 | 105 | 0.540476 |
7955eba9268a2c534f6504422fd6ac752ee6ccd9 | 53,707 | py | Python | Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query.py | znuf/content | 2b530316f76e8051896c91cf927633d5fe182eb8 | [
"MIT"
] | null | null | null | Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query.py | znuf/content | 2b530316f76e8051896c91cf927633d5fe182eb8 | [
"MIT"
] | null | null | null | Packs/Active_Directory_Query/Integrations/Active_Directory_Query/Active_Directory_Query.py | znuf/content | 2b530316f76e8051896c91cf927633d5fe182eb8 | [
"MIT"
] | null | null | null | import demistomock as demisto
from CommonServerPython import *
from typing import List, Dict, Optional
from ldap3 import Server, Connection, NTLM, SUBTREE, ALL_ATTRIBUTES, Tls, Entry, Reader, ObjectDef
from ldap3.extend import microsoft
import ssl
from datetime import datetime
import traceback
import os
from ldap3.utils.log import (set_library_log_detail_level, get_library_log_detail_level,
set_library_log_hide_sensitive_data, EXTENDED)
# global connection
conn: Optional[Connection] = None
''' GLOBAL VARS '''
# userAccountControl is a bitmask used to store a number of settings.
# find more at:
# https://support.microsoft.com/en-gb/help/305144/how-to-use-the-useraccountcontrol-flags-to-manipulate-user-account-pro
DEFAULT_OUTGOING_MAPPER = "User Profile - Active Directory (Outgoing)"
DEFAULT_INCOMING_MAPPER = "User Profile - Active Directory (Incoming)"
COOMON_ACCOUNT_CONTROL_FLAGS = {
512: "Enabled Account",
514: "Disabled account",
544: "Password Not Required",
4096: "Workstation/server",
66048: "Enabled, password never expires",
66050: "Disabled, password never expires",
66080: "Enables, password never expires, password not required.",
532480: "Domain controller"
}
NORMAL_ACCOUNT = 512
DISABLED_ACCOUNT = 514
INACTIVE_LIST_OPTIONS = [514, 546, 66050, 66082, 262658, 262690, 328226]
DEFAULT_LIMIT = 20
# common attributes for specific AD objects
DEFAULT_PERSON_ATTRIBUTES = [
'name',
'displayName',
'memberOf',
'mail',
'sAMAccountName',
'manager',
'userAccountControl'
]
DEFAULT_COMPUTER_ATTRIBUTES = [
'name',
'memberOf'
]
FIELDS_THAT_CANT_BE_MODIFIED = [
"dn", "samaccountname", "cn", "ou"
]
''' HELPER FUNCTIONS '''
def initialize_server(host, port, secure_connection, unsecure):
"""
uses the instance configuration to initialize the LDAP server
:param host: host or ip
:type host: string
:param port: port or None
:type port: number
:param secure_connection: SSL or None
:type secure_connection: string
:param unsecure: trust any cert
:type unsecure: boolean
:return: ldap3 Server
:rtype: Server
"""
if secure_connection == "SSL":
# intialize server with ssl
# port is configured by default as 389 or as 636 for LDAPS if not specified in configuration
demisto.debug("initializing sever with ssl (unsecure: {}). port: {}". format(unsecure, port or 'default(636)'))
if not unsecure:
demisto.debug("will require server certificate.")
tls = Tls(validate=ssl.CERT_REQUIRED, ca_certs_file=os.environ.get('SSL_CERT_FILE'))
if port:
return Server(host, port=port, use_ssl=True, tls=tls)
return Server(host, use_ssl=True, tls=tls)
if port:
return Server(host, port=port, use_ssl=True)
return Server(host, use_ssl=True)
demisto.debug("initializing server without secure connection. port: {}". format(port or 'default(389)'))
if port:
return Server(host, port=port)
return Server(host)
def account_entry(person_object, custome_attributes):
# create an account entry from a person objects
account = {
'Type': 'AD',
'ID': person_object.get('dn'),
'Email': person_object.get('mail'),
'Username': person_object.get('sAMAccountName'),
'DisplayName': person_object.get('displayName'),
'Managr': person_object.get('manager'),
'Manager': person_object.get('manager'),
'Groups': person_object.get('memberOf')
}
lower_cased_person_object_keys = {
person_object_key.lower(): person_object_key for person_object_key in person_object.keys()
}
for attr in custome_attributes:
try:
account[attr] = person_object[attr]
except KeyError as e:
lower_cased_custom_attr = attr.lower()
if lower_cased_custom_attr in lower_cased_person_object_keys:
cased_custom_attr = lower_cased_person_object_keys.get(lower_cased_custom_attr, '')
account[cased_custom_attr] = person_object[cased_custom_attr]
else:
demisto.error(f'Failed parsing custom attribute {attr}, error: {e}')
return account
def endpoint_entry(computer_object, custome_attributes):
# create an endpoint entry from a computer object
endpoint = {
'Type': 'AD',
'ID': computer_object.get('dn'),
'Hostname': computer_object.get('name'),
'Groups': computer_object.get('memberOf')
}
lower_cased_person_object_keys = {
person_object_key.lower(): person_object_key for person_object_key in computer_object.keys()
}
for attr in custome_attributes:
if attr == '*':
continue
try:
endpoint[attr] = computer_object[attr]
except KeyError as e:
lower_cased_custom_attr = attr.lower()
if lower_cased_custom_attr in lower_cased_person_object_keys:
cased_custom_attr = lower_cased_person_object_keys.get(lower_cased_custom_attr, '')
endpoint[cased_custom_attr] = computer_object[cased_custom_attr]
else:
demisto.error(f'Failed parsing custom attribute {attr}, error: {e}')
return endpoint
def base_dn_verified(base_dn):
# serch AD with a simple query to test base DN is configured correctly
try:
search(
"(objectClass=user)",
base_dn,
size_limit=1
)
except Exception as e:
demisto.info(str(e))
return False
return True
def generate_dn_and_remove_from_user_profile(user):
"""Generates a user dn, in case user dn is included in the user, will return it, otherwise
will generate one using the cn and ou values
:param user: The user dict including his values
:return: The user's dn.
"""
user_dn = user.get("dn")
if user_dn:
user.pop("dn")
return user_dn
cn = user.get("cn")
if not cn:
raise Exception("User must have cn, please provide a valid value")
ou = user.get("ou")
if not ou:
raise Exception("User must have ou, please provide a valid value")
return 'CN=' + str(cn) + ',' + str(ou)
def check_if_user_exists_by_samaccountname(default_base_dn, samaccountname):
"""Check if user exists base on his samaccountname
:param default_base_dn: The location in the DIT where the search will start
:param samaccountname: The user's unique samaccountname
:return: True if the user exists, False otherwise.
"""
query = f'(&(objectClass=User)(objectCategory=person)(samaccountname={samaccountname}))'
entries = search_with_paging(
query,
default_base_dn,
attributes=["samaccountname"],
size_limit=1,
page_size=1
)
if entries.get('flat'):
return True
return False
def get_user_activity_by_samaccountname(default_base_dn, samaccountname):
"""Get if user is active or not by samaccountname
:param default_base_dn: The location in the DIT where the search will start
:param samaccountname: The user's unique samaccountname
:return: True if the user active, False otherwise.
"""
active = False
query = f'(&(objectClass=User)(objectCategory=person)(samaccountname={samaccountname}))'
entries = search_with_paging(
query,
default_base_dn,
attributes=["userAccountControl"],
size_limit=1,
page_size=1
)
if entries.get('flat'):
user = entries.get('flat')[0]
activity = user.get('userAccountControl')[0]
active = activity not in INACTIVE_LIST_OPTIONS
return active
def modify_user_ou(dn, new_ou):
assert conn is not None
cn = dn.split(',', 1)[0]
success = conn.modify_dn(dn, cn, new_superior=new_ou)
return success
def get_all_attributes(search_base):
obj_inetorgperson = ObjectDef('user', conn)
r = Reader(conn, obj_inetorgperson, search_base)
r.search()
if not r:
return []
if not r[0]:
return []
attributes = r[0].entry_attributes
return attributes
''' COMMANDS '''
''' SEARCH '''
def search(search_filter, search_base, attributes=None, size_limit=0, time_limit=0):
"""
find entries in the DIT
Args:
search_base: the location in the DIT where the search will start
search_filte: LDAP query string
attributes: the attributes to specify for each entry found in the DIT
"""
assert conn is not None
success = conn.search(
search_base=search_base,
search_filter=search_filter,
attributes=attributes,
size_limit=size_limit,
time_limit=time_limit
)
if not success:
raise Exception("Search failed")
return conn.entries
def search_with_paging(search_filter, search_base, attributes=None, page_size=100, size_limit=0, time_limit=0):
"""
find entries in the DIT
Args:
search_base: the location in the DIT where the search will start
search_filter: LDAP query string
attributes: the attributes to specify for each entrxy found in the DIT
"""
assert conn is not None
total_entries = 0
cookie = None
start = datetime.now()
entries: List[Entry] = []
entries_left_to_fetch = size_limit
while True:
if 0 < entries_left_to_fetch < page_size:
page_size = entries_left_to_fetch
conn.search(
search_base,
search_filter,
search_scope=SUBTREE,
attributes=attributes,
paged_size=page_size,
paged_cookie=cookie
)
entries_left_to_fetch -= len(conn.entries)
total_entries += len(conn.entries)
cookie = conn.result['controls']['1.2.840.113556.1.4.319']['value']['cookie']
time_diff = (start - datetime.now()).seconds
entries.extend(conn.entries)
# stop when: 1.reached size limit 2.reached time limit 3. no cookie
if (size_limit and size_limit <= total_entries) or (time_limit and time_diff >= time_limit) or (not cookie):
break
# keep the raw entry for raw content (backward compatibility)
raw = []
# flatten the entries
flat = []
for entry in entries:
entry = json.loads(entry.entry_to_json())
flat_entry = {
'dn': entry['dn']
}
for attr in entry.get('attributes', {}):
flat_entry[attr] = entry['attributes'][attr]
raw.append(entry)
flat.append(flat_entry)
return {
"raw": raw,
"flat": flat
}
def user_dn(sam_account_name, search_base):
search_filter = '(&(objectClass=user)(sAMAccountName={}))'.format(sam_account_name)
entries = search(
search_filter,
search_base
)
if not entries:
raise Exception("Could not get full DN for user with sAMAccountName '{}'".format(sam_account_name))
entry = json.loads(entries[0].entry_to_json())
return entry['dn']
def computer_dn(compuer_name, search_base):
search_filter = '(&(objectClass=user)(objectCategory=computer)(name={}))'.format(compuer_name)
entries = search(
search_filter,
search_base
)
if not entries:
raise Exception("Could not get full DN for computer with name '{}'".format(compuer_name))
entry = json.loads(entries[0].entry_to_json())
return entry['dn']
def group_dn(group_name, search_base):
search_filter = '(&(objectClass=group)(cn={}))'.format(group_name)
entries = search(
search_filter,
search_base
)
if not entries:
raise Exception("Could not get full DN for group with name '{}'".format(group_name))
entry = json.loads(entries[0].entry_to_json())
return entry['dn']
def convert_special_chars_to_unicode(search_filter):
# We allow users to use special chars without explicitly typing their unicode values
chars_to_replace = {
'\\(': '\\28',
'\\)': '\\29',
'\\*': '\\2a',
'\\/': '\\2f',
'\\\\': '\\5c'
}
for i, j in chars_to_replace.items():
search_filter = search_filter.replace(i, j)
return search_filter
def free_search(default_base_dn, page_size):
args = demisto.args()
search_filter = args.get('filter')
size_limit = int(args.get('size-limit', '0'))
time_limit = int(args.get('time-limit', '0'))
search_base = args.get('base-dn') or default_base_dn
attributes = args.get('attributes')
context_output = args.get('context-output')
search_filter = convert_special_chars_to_unicode(search_filter)
# if ALL was specified - get all the object's attributes, else expect a string of comma separated values
if attributes:
attributes = ALL_ATTRIBUTES if attributes == 'ALL' else attributes.split(',')
entries = search_with_paging(
search_filter,
search_base,
attributes=attributes,
size_limit=size_limit,
time_limit=time_limit,
page_size=page_size
)
ec = {} if context_output == 'no' else {'ActiveDirectory.Search(obj.dn == val.dn)': entries['flat']}
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory Search", entries['flat']),
'EntryContext': ec
}
demisto.results(demisto_entry)
def search_users(default_base_dn, page_size):
# this command is equivalent to script ADGetUser
# will preform a custom search to find users by a specific (one) attribute specified by the user
args = demisto.args()
attributes: List[str] = []
custom_attributes: List[str] = []
# zero is actually no limitation, default is 20
limit = int(args.get('limit', '20'))
if limit <= 0:
limit = 20
# default query - list all users
query = "(&(objectClass=User)(objectCategory=person))"
# query by user DN
if args.get('dn'):
query = "(&(objectClass=User)(objectCategory=person)(distinguishedName={}))".format(args['dn'])
# query by name
if args.get('name'):
query = "(&(objectClass=User)(objectCategory=person)(cn={}))".format(args['name'])
# query by email
if args.get('email'):
query = "(&(objectClass=User)(objectCategory=person)(mail={}))".format(args['email'])
# query by sAMAccountName
if args.get('username'):
query = "(&(objectClass=User)(objectCategory=person)(sAMAccountName={}))".format(args['username'])
# query by custom object attribute
if args.get('custom-field-type'):
if not args.get('custom-field-data'):
raise Exception('Please specify "custom-field-data" as well when quering by "custom-field-type"')
query = "(&(objectClass=User)(objectCategory=person)({}={}))".format(
args['custom-field-type'], args['custom-field-data'])
if args.get('attributes'):
custom_attributes = args['attributes'].split(",")
attributes = list(set(custom_attributes + DEFAULT_PERSON_ATTRIBUTES))
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
size_limit=limit,
page_size=page_size
)
accounts = [account_entry(entry, custom_attributes) for entry in entries['flat']]
if args.get('user-account-control-out', '') == 'true':
# display a literal translation of the numeric account control flag
for i, user in enumerate(entries['flat']):
flag_no = user.get('userAccountControl')[0]
entries['flat'][i]['userAccountControl'] = COOMON_ACCOUNT_CONTROL_FLAGS.get(flag_no) or flag_no
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Users", entries['flat']),
'EntryContext': {
'ActiveDirectory.Users(obj.dn == val.dn)': entries['flat'],
# 'backward compatability' with ADGetUser script
'Account(obj.ID == val.ID)': accounts
}
}
demisto.results(demisto_entry)
def get_user_iam(default_base_dn, args, mapper_in, mapper_out):
"""Gets an AD user by User Profile.
:param default_base_dn: The location in the DIT where the search will start
:param args: Demisto args.
:param mapper_in: Mapping AD user to User Profiles
:param mapper_out: Mapping User Profiles to AD users.
:return: User Profile of the AD user
"""
try:
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
default_attribute = "samaccountname"
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=mapper_out)
value = ad_user.get(default_attribute)
# removing keys with no values
user = {k: v for k, v in ad_user.items() if v}
attributes = list(user.keys())
query = f'(&(objectClass=User)(objectCategory=person)({default_attribute}={value}))'
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
size_limit=1,
page_size=1
)
if not entries.get('flat'):
iam_user_profile.set_result(success=False,
error_message="No user was found",
action=IAMActions.GET_USER
)
else:
user_account_control = get_user_activity_by_samaccountname(default_base_dn, value)
ad_user["userAccountControl"] = user_account_control
iam_user_profile.update_with_app_data(ad_user, mapper_in)
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
action=IAMActions.GET_USER,
details=ad_user,
active=user_account_control)
return iam_user_profile
except Exception as e:
iam_user_profile.set_result(success=False,
error_message=str(e),
action=IAMActions.GET_USER
)
return iam_user_profile
def search_computers(default_base_dn, page_size):
# this command is equivalent to ADGetComputer script
args = demisto.args()
attributes: List[str] = []
custome_attributes: List[str] = []
# default query - list all users (computer category)
query = "(&(objectClass=user)(objectCategory=computer))"
# query by user DN
if args.get('dn'):
query = "(&(objectClass=user)(objectCategory=computer)(distinguishedName={}))".format(args['dn'])
# query by name
if args.get('name'):
query = "(&(objectClass=user)(objectCategory=computer)(name={}))".format(args['name'])
# query by custom object attribute
if args.get('custom-field-type'):
if not args.get('custom-field-data'):
raise Exception('Please specify "custom-field-data" as well when quering by "custom-field-type"')
query = "(&(objectClass=user)(objectCategory=computer)({}={}))".format(
args['custom-field-type'], args['custom-field-data'])
if args.get('attributes'):
custome_attributes = args['attributes'].split(",")
attributes = list(set(custome_attributes + DEFAULT_COMPUTER_ATTRIBUTES))
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
page_size=page_size
)
endpoints = [endpoint_entry(entry, custome_attributes) for entry in entries['flat']]
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Computers", entries['flat']),
'EntryContext': {
'ActiveDirectory.Computers(obj.dn == val.dn)': entries['flat'],
# 'backward compatability' with ADGetComputer script
'Endpoint(obj.ID == val.ID)': endpoints
}
}
demisto.results(demisto_entry)
def search_group_members(default_base_dn, page_size):
# this command is equivalent to ADGetGroupMembers script
args = demisto.args()
member_type = args.get('member-type')
group_dn = args.get('group-dn')
nested_search = '' if args.get('disable-nested-search') == 'true' else ':1.2.840.113556.1.4.1941:'
time_limit = int(args.get('time_limit', 180))
custome_attributes: List[str] = []
default_attributes = DEFAULT_PERSON_ATTRIBUTES if member_type == 'person' else DEFAULT_COMPUTER_ATTRIBUTES
if args.get('attributes'):
custome_attributes = args['attributes'].split(",")
attributes = list(set(custome_attributes + default_attributes))
query = "(&(objectCategory={})(objectClass=user)(memberOf{}={}))".format(member_type, nested_search, group_dn)
entries = search_with_paging(
query,
default_base_dn,
attributes=attributes,
page_size=page_size,
time_limit=time_limit
)
members = [{'dn': entry['dn'], 'category': member_type} for entry in entries['flat']]
demisto_entry = {
'ContentsFormat': formats['json'],
'Type': entryTypes['note'],
'Contents': entries['raw'],
'ReadableContentsFormat': formats['markdown'],
'HumanReadable': tableToMarkdown("Active Directory - Get Group Members", entries['flat']),
'EntryContext': {
'ActiveDirectory.Groups(obj.dn ==' + group_dn + ')': {
'dn': group_dn,
'members': members
}
}
}
if member_type == 'person':
demisto_entry['EntryContext']['ActiveDirectory.Users(obj.dn == val.dn)'] = entries['flat']
demisto_entry['EntryContext']['Account'] = [account_entry(
entry, custome_attributes) for entry in entries['flat']]
else:
demisto_entry['EntryContext']['ActiveDirectory.Computers(obj.dn == val.dn)'] = entries['flat']
demisto_entry['EntryContext']['Endpoint'] = [endpoint_entry(
entry, custome_attributes) for entry in entries['flat']]
demisto.results(demisto_entry)
''' DATABASE OPERATIONS '''
''' CREATE OBJECT'''
def create_user():
assert conn is not None
args = demisto.args()
object_classes = ["top", "person", "organizationalPerson", "user"]
user_dn = args.get('user-dn')
username = args.get("username")
password = args.get("password")
custome_attributes = args.get('custom-attributes')
attributes = {
"sAMAccountName": username
}
# set common user attributes
if args.get('display-name'):
attributes['displayName'] = args['display-name']
if args.get('description'):
attributes['description'] = args['description']
if args.get('email'):
attributes['mail'] = args['email']
if args.get('telephone-number'):
attributes['telephoneNumber'] = args['telephone-number']
if args.get('title'):
attributes['title'] = args['title']
# set user custome attributes
if custome_attributes:
try:
custome_attributes = json.loads(custome_attributes)
except Exception as e:
demisto.info(str(e))
raise Exception(
"Failed to parse custom attributes argument. Please see an example of this argument in the description."
)
for attribute_name, attribute_value in custome_attributes.items():
# can run default attribute stting
attributes[attribute_name] = attribute_value
# add user
success = conn.add(user_dn, object_classes, attributes)
if not success:
raise Exception("Failed to create user")
# set user password
success = conn.extend.microsoft.modify_password(user_dn, password)
if not success:
raise Exception("Failed to reset user password")
# enable user and expire password
modification = {
# enable user
'userAccountControl': [('MODIFY_REPLACE', NORMAL_ACCOUNT)],
# set to 0, to force password change on next login
"pwdLastSet": [('MODIFY_REPLACE', "0")]
}
modify_object(user_dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Created user with DN: {}".format(user_dn)
}
demisto.results(demisto_entry)
def create_user_iam(default_base_dn, args, mapper_out, disabled_users_group_cn):
"""Creates an AD user by User Profile.
:param default_base_dn: The location in the DIT where the search will start
:param args: Demisto args.
:param mapper_out: Mapping User Profiles to AD users.
:param disabled_users_group_cn: The disabled group cn, the user will be removed from this group when enabled
:return: The user that was created
"""
assert conn is not None
try:
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=mapper_out)
sam_account_name = ad_user.get("samaccountname")
if not sam_account_name:
raise DemistoException("User must have SAMAccountName")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, sam_account_name)
if user_exists:
iam_user_profile = update_user_iam(default_base_dn, args, False, mapper_out, disabled_users_group_cn)
else:
user_dn = generate_dn_and_remove_from_user_profile(ad_user)
object_classes = ["top", "person", "organizationalPerson", "user"]
success = conn.add(user_dn, object_classes, ad_user)
if success:
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
details=ad_user,
action=IAMActions.CREATE_USER,
active=True)
else:
iam_user_profile.set_result(success=False,
error_message="Failed to create user",
action=IAMActions.CREATE_USER
)
return iam_user_profile
except Exception as e:
iam_user_profile.set_result(success=False,
error_message=str(e),
action=IAMActions.CREATE_USER,
)
return iam_user_profile
def update_user_iam(default_base_dn, args, create_if_not_exists, mapper_out, disabled_users_group_cn):
"""Update an AD user by User Profile.
:param default_base_dn: The location in the DIT where the search will start
:param args: Demisto args.
:param create_if_not_exists: Created the user if it does not exists.
:param mapper_out: Mapping User Profiles to AD users.
:param disabled_users_group_cn: The disabled group cn, the user will be removed from this group when enabled
:return: Updated User
"""
assert conn is not None
try:
user_profile = args.get("user-profile")
allow_enable = args.get('allow-enable') == 'true'
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=mapper_out)
# check it user exists and if it doesn't, create it
sam_account_name = ad_user.get("samaccountname")
if not sam_account_name:
raise DemistoException("User must have SAMAccountName")
new_ou = ad_user.get("ou")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, sam_account_name)
if not user_exists and create_if_not_exists:
iam_user_profile = create_user_iam(default_base_dn, args, mapper_out, disabled_users_group_cn)
elif user_exists:
dn = user_dn(sam_account_name, default_base_dn)
if allow_enable:
enable_user_iam(default_base_dn, dn, disabled_users_group_cn)
# fields that can't be modified
# notice that we are changing the ou and that effects the dn and cn
for field in FIELDS_THAT_CANT_BE_MODIFIED:
if ad_user.get(field):
ad_user.pop(field)
fail_to_modify = []
for key in ad_user:
modification = {key: [('MODIFY_REPLACE', ad_user.get(key))]}
success = conn.modify(dn, modification)
if not success:
fail_to_modify.append(key)
ou_modified_succeed = modify_user_ou(dn, new_ou)
if not ou_modified_succeed:
fail_to_modify.append("ou")
if fail_to_modify:
error_list = '\n'.join(fail_to_modify)
error_message = f"Fail to modify the following attributes: {error_list}"
iam_user_profile.set_result(success=False,
error_message=error_message,
action=IAMActions.UPDATE_USER,
)
else:
active = get_user_activity_by_samaccountname(default_base_dn, sam_account_name)
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
action=IAMActions.UPDATE_USER,
details=ad_user,
active=active)
return iam_user_profile
except Exception as e:
iam_user_profile.set_result(success=False,
error_message=str(e),
action=IAMActions.UPDATE_USER
)
return iam_user_profile
def create_contact():
assert conn is not None
args = demisto.args()
object_classes = ["top", "person", "organizationalPerson", "contact"]
contact_dn = args.get('contact-dn')
# set contact attributes
attributes: Dict = {}
if args.get('custom-attributes'):
try:
attributes = json.loads(args['custom-attributes'])
except Exception as e:
demisto.info(str(e))
raise Exception(
'Failed to parse custom attributes argument. Please see an example of this argument in the argument.'
)
# set common user attributes
if args.get('display-name'):
attributes['displayName'] = args['display-name']
if args.get('description'):
attributes['description'] = args['description']
if args.get('email'):
attributes['mail'] = args['email']
if args.get('telephone-number'):
attributes['telephoneNumber'] = args['telephone-number']
if args.get('title'):
attributes['title'] = args['title']
# add contact
success = conn.add(contact_dn, object_classes, attributes)
if not success:
raise Exception("Failed to create contact")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Created contact with DN: {}".format(contact_dn)
}
demisto.results(demisto_entry)
def create_group():
assert conn is not None
args = demisto.args()
object_classes = ["top", "group"]
dn = args.get('dn')
group_name = args.get('name')
group_type_map = {"security": "2147483650", "distribution": "2"}
group_type = group_type_map[args.get("group-type")]
if args.get('members'):
members = args.get('members')
attributes = {
"samAccountName": group_name,
"groupType": group_type,
"member": members
}
else:
attributes = {
"samAccountName": group_name,
"groupType": group_type
}
# create group
success = conn.add(dn, object_classes, attributes)
if not success:
raise Exception("Failed to create group")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Created group with DN: {}".format(dn)
}
demisto.results(demisto_entry)
''' UPDATE OBJECT '''
def modify_object(dn, modification):
"""
modifies object in the DIT
"""
assert conn is not None
success = conn.modify(dn, modification)
if not success:
raise Exception("Failed to update object {} with the following modification: {}".format(
dn, json.dumps(modification)))
def update_user(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
attribute_name = args.get('attribute-name')
attribute_value = args.get('attribute-value')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
modification = {}
modification[attribute_name] = [('MODIFY_REPLACE', attribute_value)]
# modify user
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Updated user's {} to {} ".format(attribute_name, attribute_value)
}
demisto.results(demisto_entry)
def update_contact():
args = demisto.args()
contact_dn = args.get('contact-dn')
modification = {}
modification[args.get('attribute-name')] = [('MODIFY_REPLACE', args.get('attribute-value'))]
# modify
modify_object(contact_dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Updated contact's {} to: {} ".format(args.get('attribute-name'), args.get('attribute-value'))
}
demisto.results(demisto_entry)
def modify_computer_ou(default_base_dn):
assert conn is not None
args = demisto.args()
computer_name = args.get('computer-name')
dn = computer_dn(computer_name, args.get('base-dn') or default_base_dn)
success = conn.modify_dn(dn, "CN={}".format(computer_name), new_superior=args.get('full-superior-dn'))
if not success:
raise Exception("Failed to modify computer OU")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Moved computer {} to {}".format(computer_name, args.get('full-superior-dn'))
}
demisto.results(demisto_entry)
def expire_user_password(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
modification = {
# set to 0, to force password change on next login
"pwdLastSet": [('MODIFY_REPLACE', "0")]
}
# modify user
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Expired password successfully"
}
demisto.results(demisto_entry)
def set_user_password(default_base_dn):
assert conn is not None
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
password = args.get('password')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
# set user password
success = conn.extend.microsoft.modify_password(dn, password)
if not success:
raise Exception("Failed to reset user password")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "User password successfully set"
}
demisto.results(demisto_entry)
def enable_user(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
# modify user
modification = {
'userAccountControl': [('MODIFY_REPLACE', NORMAL_ACCOUNT)]
}
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "User {} was enabled".format(sam_account_name)
}
demisto.results(demisto_entry)
def disable_user(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
# modify user
modification = {
'userAccountControl': [('MODIFY_REPLACE', DISABLED_ACCOUNT)]
}
modify_object(dn, modification)
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "User {} was disabled".format(sam_account_name)
}
demisto.results(demisto_entry)
def enable_user_iam(default_base_dn, dn, disabled_users_group_cn):
"""Enables an AD user by User Profile.
:param default_base_dn: The location in the DIT where the search will start
:param dn: The users unique dn
:param disabled_users_group_cn: The disabled group cn, the user will be removed from this group when enabled
"""
modification = {
'userAccountControl': [('MODIFY_REPLACE', NORMAL_ACCOUNT)]
}
modify_object(dn, modification)
if disabled_users_group_cn:
grp_dn = group_dn(disabled_users_group_cn, default_base_dn)
success = microsoft.removeMembersFromGroups.ad_remove_members_from_groups(conn, [dn], [grp_dn], True)
if not success:
raise Exception('Failed to remove user from {} group'.format(disabled_users_group_cn))
def disable_user_iam(default_base_dn, disabled_users_group_cn, args, mapper_out):
"""Disables an AD user by User Profile.
:param default_base_dn: The location in the DIT where the search will start
:param disabled_users_group_cn: The disabled group cn, the user will be added from this group when enabled
:param args: Demisto args.
:param mapper_out: Mapping User Profiles to AD users.
:return: The disabled user
"""
try:
user_profile = args.get("user-profile")
user_profile_delta = args.get('user-profile-delta')
iam_user_profile = IAMUserProfile(user_profile=user_profile, user_profile_delta=user_profile_delta)
ad_user = iam_user_profile.map_object(mapper_name=mapper_out)
sam_account_name = ad_user.get("samaccountname")
if not sam_account_name:
raise DemistoException("User must have SAMAccountName")
user_exists = check_if_user_exists_by_samaccountname(default_base_dn, sam_account_name)
if not user_exists:
iam_user_profile.set_result(success=True, action=IAMActions.DISABLE_USER,
skip=True, skip_reason="User doesn't exists")
return iam_user_profile
dn = user_dn(sam_account_name, default_base_dn)
# modify user
modification = {
'userAccountControl': [('MODIFY_REPLACE', DISABLED_ACCOUNT)]
}
command_failed = False
modify_object(dn, modification)
if disabled_users_group_cn:
grp_dn = group_dn(disabled_users_group_cn, default_base_dn)
success = microsoft.addMembersToGroups.ad_add_members_to_groups(conn, [dn], [grp_dn])
if not success:
command_failed = True
e = 'Failed to remove user from {} group'.format(disabled_users_group_cn)
iam_user_profile.set_result(success=False,
error_message=e,
action=IAMActions.DISABLE_USER,
)
if not command_failed:
iam_user_profile.set_result(success=True,
email=ad_user.get('email'),
username=ad_user.get('name'),
action=IAMActions.DISABLE_USER,
details=ad_user,
active=False)
return iam_user_profile
except Exception as e:
iam_user_profile.set_result(success=False,
error_message=str(e),
action=IAMActions.DISABLE_USER
)
return iam_user_profile
def add_member_to_group(default_base_dn):
args = demisto.args()
search_base = args.get('base-dn') or default_base_dn
# get the dn of the member - either user or computer
args_err = "Pleade provide either username or computer-name"
member_dn = ''
if args.get('username') and args.get('computer-name'):
# both arguments passed
raise Exception(args_err)
if args.get('username'):
member_dn = user_dn(args['username'], search_base)
elif args.get('computer-name'):
member_dn = computer_dn(args['computer-name'], search_base)
else:
# none of the arguments passed
raise Exception(args_err)
grp_dn = group_dn(args.get('group-cn'), search_base)
success = microsoft.addMembersToGroups.ad_add_members_to_groups(conn, [member_dn], [grp_dn])
if not success:
raise Exception("Failed to add {} to group {}".format(
args.get('username') or args.get('computer-name'),
args.get('group_name')
))
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Object with dn {} was added to group {}".format(member_dn, args.get('group-cn'))
}
demisto.results(demisto_entry)
def remove_member_from_group(default_base_dn):
args = demisto.args()
search_base = args.get('base-dn') or default_base_dn
# get the dn of the member - either user or computer
args_err = "Pleade provide either username or computer-name"
member_dn = ''
if args.get('username') and args.get('computer-name'):
# both arguments passed
raise Exception(args_err)
if args.get('username'):
member_dn = user_dn(args['username'], search_base)
elif args.get('computer-name'):
member_dn = computer_dn(args['computer-name'], search_base)
else:
# none of the arguments passed
raise Exception(args_err)
grp_dn = group_dn(args.get('group-cn'), search_base)
success = microsoft.removeMembersFromGroups.ad_remove_members_from_groups(conn, [member_dn], [grp_dn], True)
if not success:
raise Exception("Failed to remove {} from group {}".format(
args.get('username') or args.get('computer-name'),
args.get('group_name')
))
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Object with dn {} removed from group {}".format(member_dn, args.get('group-cn'))
}
demisto.results(demisto_entry)
def unlock_account(default_base_dn):
args = demisto.args()
# get user DN
sam_account_name = args.get('username')
search_base = args.get('base-dn') or default_base_dn
dn = user_dn(sam_account_name, search_base)
success = microsoft.unlockAccount.ad_unlock_account(conn, dn)
if not success:
raise Exception("Failed to unlock user {}".format(sam_account_name))
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Unlocked user {}".format(sam_account_name)
}
demisto.results(demisto_entry)
''' DELETE OBJECT '''
def delete_user():
# can actually delete any object...
assert conn is not None
success = conn.delete(demisto.args().get('user-dn'))
if not success:
raise Exception('Failed to delete user')
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Deleted object with dn {}".format(demisto.args().get('user-dn'))
}
demisto.results(demisto_entry)
def delete_group():
assert conn is not None
args = demisto.args()
dn = args.get('dn')
# delete group
success = conn.delete(dn)
if not success:
raise Exception("Failed to delete group")
demisto_entry = {
'ContentsFormat': formats['text'],
'Type': entryTypes['note'],
'Contents': "Deleted group with DN: {}".format(dn)
}
demisto.results(demisto_entry)
def get_mapping_fields_command(search_base):
ad_attributes = get_all_attributes(search_base)
# add keys that are not attributes but can be used in mapping
ad_attributes.extend(("dn", "samaccountname"))
incident_type_scheme = SchemeTypeMapping(type_name=IAMUserProfile.INDICATOR_TYPE)
for field in ad_attributes:
incident_type_scheme.add_field(field, "Field")
return GetMappingFieldsResponse([incident_type_scheme])
'''
TEST CONFIGURATION
authenticate user credentials while initializing connection with AD server
verify base DN is configured correctly
'''
def main():
''' INSTANCE CONFIGURATION '''
params = demisto.params()
SERVER_IP = params.get('server_ip')
USERNAME = params.get('credentials')['identifier']
PASSWORD = params.get('credentials')['password']
DEFAULT_BASE_DN = params.get('base_dn')
SECURE_CONNECTION = params.get('secure_connection')
DEFAULT_PAGE_SIZE = int(params.get('page_size'))
NTLM_AUTH = params.get('ntlm')
UNSECURE = params.get('unsecure', False)
PORT = params.get('port')
disabled_users_group_cn = params.get('group-cn')
create_if_not_exists = params.get('create-if-not-exists')
mapper_in = params.get('mapper-in', DEFAULT_INCOMING_MAPPER)
mapper_out = params.get('mapper-out', DEFAULT_OUTGOING_MAPPER)
if PORT:
# port was configured, cast to int
PORT = int(PORT)
last_log_detail_level = None
try:
try:
set_library_log_hide_sensitive_data(True)
if is_debug_mode():
demisto.info('debug-mode: setting library log detail to EXTENDED')
last_log_detail_level = get_library_log_detail_level()
set_library_log_detail_level(EXTENDED)
server = initialize_server(SERVER_IP, PORT, SECURE_CONNECTION, UNSECURE)
except Exception as e:
return_error(str(e))
return
global conn
if NTLM_AUTH:
# intialize connection to LDAP server with NTLM authentication
# user example: domain\user
domain_user = SERVER_IP + '\\' + USERNAME if '\\' not in USERNAME else USERNAME
conn = Connection(server, user=domain_user, password=PASSWORD, authentication=NTLM)
else:
# here username should be the user dn
conn = Connection(server, user=USERNAME, password=PASSWORD)
# bind operation is the “authenticate” operation.
try:
# open socket and bind to server
if not conn.bind():
message = "Failed to bind to server. Please validate the credentials configured correctly.\n{}".format(
json.dumps(conn.result))
return_error(message)
return
except Exception as e:
exc_msg = str(e)
demisto.info("Failed bind to: {}:{}. {}: {}".format(SERVER_IP, PORT, type(e), exc_msg
+ "\nTrace:\n{}".format(traceback.format_exc())))
message = "Failed to access LDAP server. Please validate the server host and port are configured correctly"
if 'ssl wrapping error' in exc_msg:
message = "Failed to access LDAP server. SSL error."
if not UNSECURE:
message += ' Try using: "Trust any certificate" option.'
return_error(message)
return
demisto.info('Established connection with AD LDAP server')
if not base_dn_verified(DEFAULT_BASE_DN):
message = "Failed to verify the base DN configured for the instance.\n" \
"Last connection result: {}\n" \
"Last error from LDAP server: {}".format(json.dumps(conn.result), json.dumps(conn.last_error))
return_error(message)
return
demisto.info('Verfied base DN "{}"'.format(DEFAULT_BASE_DN))
''' COMMAND EXECUTION '''
if demisto.command() == 'test-module':
if conn.user == '':
# Empty response means you have no authentication status on the server, so you are an anonymous user.
raise Exception("Failed to authenticate user")
demisto.results('ok')
args = demisto.args()
if demisto.command() == 'ad-search':
free_search(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-expire-password':
expire_user_password(DEFAULT_BASE_DN)
if demisto.command() == 'ad-set-new-password':
set_user_password(DEFAULT_BASE_DN)
if demisto.command() == 'ad-unlock-account':
unlock_account(DEFAULT_BASE_DN)
if demisto.command() == 'ad-disable-account':
disable_user(DEFAULT_BASE_DN)
if demisto.command() == 'ad-enable-account':
enable_user(DEFAULT_BASE_DN)
if demisto.command() == 'ad-remove-from-group':
remove_member_from_group(DEFAULT_BASE_DN)
if demisto.command() == 'ad-add-to-group':
add_member_to_group(DEFAULT_BASE_DN)
if demisto.command() == 'ad-create-user':
create_user()
if demisto.command() == 'ad-delete-user':
delete_user()
if demisto.command() == 'ad-update-user':
update_user(DEFAULT_BASE_DN)
if demisto.command() == 'ad-modify-computer-ou':
modify_computer_ou(DEFAULT_BASE_DN)
if demisto.command() == 'ad-create-contact':
create_contact()
if demisto.command() == 'ad-update-contact':
update_contact()
if demisto.command() == 'ad-get-user':
search_users(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-get-computer':
search_computers(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-get-group-members':
search_group_members(DEFAULT_BASE_DN, DEFAULT_PAGE_SIZE)
if demisto.command() == 'ad-create-group':
create_group()
if demisto.command() == 'ad-delete-group':
delete_group()
# IAM commands
if demisto.command() == 'iam-get-user':
user_profile = get_user_iam(DEFAULT_BASE_DN, args, mapper_in, mapper_out)
return return_results(user_profile)
if demisto.command() == 'iam-create-user':
user_profile = create_user_iam(DEFAULT_BASE_DN, args, mapper_out, disabled_users_group_cn)
return return_results(user_profile)
if demisto.command() == 'iam-update-user':
user_profile = update_user_iam(DEFAULT_BASE_DN, args, create_if_not_exists, mapper_out,
disabled_users_group_cn)
return return_results(user_profile)
if demisto.command() == 'iam-disable-user':
user_profile = disable_user_iam(DEFAULT_BASE_DN, disabled_users_group_cn, args, mapper_out)
return return_results(user_profile)
elif demisto.command() == 'get-mapping-fields':
mapping_fields = get_mapping_fields_command(DEFAULT_BASE_DN)
return return_results(mapping_fields)
except Exception as e:
message = str(e)
if conn:
message += "\nLast connection result: {}\nLast error from LDAP server: {}".format(
json.dumps(conn.result), conn.last_error)
return_error(message)
return
finally:
# disconnect and close the connection
if conn:
conn.unbind()
if last_log_detail_level:
set_library_log_detail_level(last_log_detail_level)
from IAMApiModule import * # noqa: E402
# python2 uses __builtin__ python3 uses builtins
if __name__ == "__builtin__" or __name__ == "builtins" or __name__ == "__main__":
main()
| 34.784326 | 120 | 0.628931 |
7955ebb8c5e933ed0f53badffc69d2182f9b1d91 | 1,006 | py | Python | tests/cli_fixtures.py | matutter/water | b3390b02a53661e84200e0c8f00e19459bc7bc82 | [
"MIT"
] | 1 | 2021-02-21T03:09:17.000Z | 2021-02-21T03:09:17.000Z | tests/cli_fixtures.py | matutter/water | b3390b02a53661e84200e0c8f00e19459bc7bc82 | [
"MIT"
] | 1 | 2021-02-19T14:24:40.000Z | 2021-02-20T20:06:58.000Z | tests/cli_fixtures.py | matutter/water | b3390b02a53661e84200e0c8f00e19459bc7bc82 | [
"MIT"
] | null | null | null | """
Fixtures specific to Click.
"""
from foremon.display import display_verbose, set_display_verbose
from typing import Any, Callable, List
import pytest
from click.testing import CliRunner, Result
from .fixtures import *
CliProg = Callable[[List[Any]], Result]
@pytest.fixture
def cli(request: SubRequest, output: CapLines) -> Result:
from foremon.cli import foremon
def run(*args):
cmd = " ".join(list(map(str, args)))
runner = CliRunner(mix_stderr=False)
result: Result = runner.invoke(foremon, cmd)
output.stdout_append(result.stdout)
output.stderr_append(result.stderr)
return result
# If testing the `-V` flag we can accidentally toggle this global and
# contaminate other tests checking for debug output indicators.
current_verbose = display_verbose()
def reset_display():
set_display_verbose(current_verbose)
request.addfinalizer(reset_display)
return run
__all__ = ['cli', 'CliProg', 'Result']
| 23.952381 | 73 | 0.704771 |
7955ec2bd8a57007f3bab3ab79c71135908db2f7 | 544 | py | Python | Arrays/1472. Design Browser History_M.py | thewires2/Leetcode | a37ff81d60dd9195ba637b970b40aabbea5f4680 | [
"Unlicense"
] | 1 | 2021-06-30T17:51:56.000Z | 2021-06-30T17:51:56.000Z | Arrays/1472. Design Browser History_M.py | thewires2/Leetcode | a37ff81d60dd9195ba637b970b40aabbea5f4680 | [
"Unlicense"
] | null | null | null | Arrays/1472. Design Browser History_M.py | thewires2/Leetcode | a37ff81d60dd9195ba637b970b40aabbea5f4680 | [
"Unlicense"
] | null | null | null | class BrowserHistory:
def __init__(self, homepage: str):
self.current=0
self.history=[homepage]
def visit(self, url: str) -> None:
self.current+=1
self.history=self.history[:self.current]
self.history.append(url)
def back(self, steps: int) -> str:
self.current=max(self.current-steps, 0)
return self.history[self.current]
def forward(self, steps: int) -> str:
self.current=min(self.current+steps, len(self.history)-1)
return self.history[self.current]
| 28.631579 | 65 | 0.628676 |
7955ee83135c8ed0a2282e0ee89c36aa8cddec8c | 1,005 | py | Python | lib/Variation_Viewer/Utils/genomeutils.py | kbasecollaborations/Variation_Viewer | cd5d590fb742a9df9666c07c0aa3b9e30f791853 | [
"MIT"
] | null | null | null | lib/Variation_Viewer/Utils/genomeutils.py | kbasecollaborations/Variation_Viewer | cd5d590fb742a9df9666c07c0aa3b9e30f791853 | [
"MIT"
] | null | null | null | lib/Variation_Viewer/Utils/genomeutils.py | kbasecollaborations/Variation_Viewer | cd5d590fb742a9df9666c07c0aa3b9e30f791853 | [
"MIT"
] | null | null | null | class genomeutils:
def __init__(self):
pass
def gff2bed(self, gff_file, output_dir):
#awk -F "\t" '{print $1"\t"$4"\t"$5"\t"$9"\t"$6"\t"$7"\t"$2"\t"$8"\t"$9}' GCA_009858895.3_ASM985889v3_genomic.gff
gff_file_name = gff_file.split("/")[-1]
bed_file = gff_file_name.replace(".gff",".bed")
try:
fw = open(output_dir +"/igv_output/data/"+bed_file, "w")
try:
with open(gff_file, "r") as fp:
for line in fp:
if not line.lstrip().startswith('#'):
line = line.rstrip()
rec = line.split("\t")
fw.write(rec[0]+"\t"+rec[3]+"\t"+rec[4]+"\t"+rec[8]+"\t"+rec[5]+"\t"+rec[6]+"\t"+rec[1]+"\t"+rec[7]+"\t"+rec[8]+"\n")
except IOError:
print ("could not write to bed file\n")
except IOError:
print("could not read input gff file\n")
fw.close()
return bed_file
| 33.5 | 140 | 0.475622 |
7955f188582ebe840a118754de32516a9885d7f5 | 7,303 | py | Python | xilinx/xilinx7_reader.py | xesscorp/KiCad-Schematic-Symbol-Libraries | ab472c05697d3e3a85ee269e4ad8fa4028dc3cc9 | [
"Unlicense"
] | 66 | 2015-08-27T12:55:01.000Z | 2021-09-03T19:16:01.000Z | xilinx/xilinx7_reader.py | devbisme/KiCad-Schematic-Symbol-Libraries | ab472c05697d3e3a85ee269e4ad8fa4028dc3cc9 | [
"Unlicense"
] | 2 | 2016-12-19T23:51:37.000Z | 2020-05-15T20:58:14.000Z | xilinx/xilinx7_reader.py | xesscorp/KiCad-Schematic-Symbol-Libraries | ab472c05697d3e3a85ee269e4ad8fa4028dc3cc9 | [
"Unlicense"
] | 19 | 2015-08-27T12:55:33.000Z | 2021-09-03T19:16:02.000Z | # MIT license
#
# Copyright (C) 2015 by XESS Corp.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from __future__ import absolute_import
import copy
import csv
import warnings
from collections import defaultdict
from kipart.common import *
from kipart.kipart import *
defaulted_names = set(list())
def xilinx7_reader(part_data_file, part_data_file_name, part_data_file_type=".csv"):
"""Extract the pin data from a Xilinx CSV file and return a dictionary of pin data."""
# If part data file is Excel, convert it to CSV.
if part_data_file_type == ".xlsx":
part_data_file = convert_xlsx_to_csv(part_data_file)
csv_file = part_data_file
# Create a dictionary that uses the unit numbers as keys. Each entry in this dictionary
# contains another dictionary that uses the side of the symbol as a key. Each entry in
# that dictionary uses the pin names in that unit and on that side as keys. Each entry
# in that dictionary is a list of Pin objects with each Pin object having the same name
# as the dictionary key. So the pins are separated into units at the top level, and then
# the sides of the symbol, and then the pins with the same name that are on that side
# of the unit.
pin_data = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
# Scan the initial portion of the file for the part number.
part_num = None
try:
while True:
line = csv_file.readline()
if re.match("^,*$", line):
# Stop searching for part number as soon as a blank line is seen.
break
elif line.startswith('"#') or line.startswith("#"):
# Look for the part number within a comment.
device = re.search(r"#\s+Device\s*:\s*(\w+)", line)
if device:
part_num = device.group(1)
else:
# Look for the part number on a line of the file.
_, part_num, date, time, _ = re.split("\s+", line)
except Exception:
return # No part was found.
if part_num is None:
return # No part number was found, so abort.
# Create a reader object for the rows of the CSV file and read it row-by-row.
csv_reader = csv.DictReader(csv_file, skipinitialspace=True)
for index, row in enumerate(csv_reader):
# A blank line signals the end of the pin data.
try:
if row["Pin"] == "":
break
except KeyError:
# Abort if a TXT file is being processed instead of a CSV file.
return
# Get the pin attributes from the cells of the row of data.
pin = copy.copy(DEFAULT_PIN)
pin.index = index
pin.name = fix_pin_data(row["Pin Name"], part_num)
pin.num = fix_pin_data(row["Pin"], part_num)
pin.unit = fix_pin_data(row["Bank"], part_num)
# The type of the pin isn't given in the CSV file, so we'll have to infer it
# from the name of the pin. Pin names starting with the following prefixes
# are assigned the given pin type.
DEFAULT_PIN_TYPE = (
"input" # Assign this pin type if name inference can't be made.
)
PIN_TYPE_PREFIXES = [
(r"VCC", "power_in"),
(r"GND", "power_in"),
(r"IO_", "bidirectional"),
(r"DONE", "output"),
(r"VREF[PN]_", "input"),
(r"TCK", "input"),
(r"TDI", "input"),
(r"TDO", "output"),
(r"TMS", "input"),
(r"CCLK", "input"),
(r"M0", "input"),
(r"M1", "input"),
(r"M2", "input"),
(r"INIT_B", "input"),
(r"PROG", "input"),
(r"NC", "no_connect"),
(r"VP_", "input"),
(r"VN_", "input"),
(r"DXP_", "passive"),
(r"DXN_", "passive"),
(r"CFGBVS_", "input"),
(r"MGTZ?REFCLK[0-9]+[NP]_", "input"),
(r"MGTZ_OBS_CLK_[PN]_", "input"),
(r"MGT[ZPHX]TX[NP][0-9]+_", "output"),
(r"MGT[ZPHX]RX[NP][0-9]+_", "input"),
(r"MGTAVTTRCAL_", "passive"),
(r"MGTRREF_", "passive"),
(r"MGTVCCAUX_?", "power_in"),
(r"MGTAVTT_?", "power_in"),
(r"MGTZ_THERM_IN_", "input"),
(r"MGTZ_THERM_OUT_", "input"),
(r"MGTZ?A(VCC|GND)_?", "power_in"),
(r"MGTZVCC[LH]_", "power_in"),
(r"MGTZ_SENSE_(A?VCC|A?GND)[LH]?_", "power_in"),
(r"RSVD(VCC[1-3]|GND)", "power_in"),
(r"PS_CLK_", "input"),
(r"PS_POR_B", "input"),
(r"PS_SRST_B", "input"),
(r"PS_DDR_CK[PN]_", "output"),
(r"PS_DDR_CKE_", "output"),
(r"PS_DDR_CS_B_", "output"),
(r"PS_DDR_RAS_B_", "output"),
(r"PS_DDR_CAS_B_", "output"),
(r"PS_DDR_WE_B_", "output"),
(r"PS_DDR_BA[0-9]+_", "output"),
(r"PS_DDR_A[0-9]+_", "output"),
(r"PS_DDR_ODT_", "output"),
(r"PS_DDR_DRST_B_", "output"),
(r"PS_DDR_DQ[0-9]+_", "bidirectional"),
(r"PS_DDR_DM[0-9]+_", "output"),
(r"PS_DDR_DQS_[PN][0-9]+_", "bidirectional"),
(r"PS_DDR_VR[PN]_", "power_out"),
(r"PS_DDR_VREF[0-9]+_", "power_in"),
(r"PS_MIO_VREF_", "power_in"),
(r"PS_MIO[0-9]+_", "bidirectional"),
]
for prefix, typ in PIN_TYPE_PREFIXES:
if re.match(prefix, pin.name, re.IGNORECASE):
pin.type = typ
break
else:
issue(
"No match for {} on {}, assigning as {}".format(
pin.name, part_num[:4], DEFAULT_PIN_TYPE
)
)
pin.type = DEFAULT_PIN_TYPE
pin.type = fix_pin_data(pin.type, part_num)
# Add the pin from this row of the CVS file to the pin dictionary.
# Place all the like-named pins into a list under their common name.
# We'll unbundle them later, if necessary.
pin_data[pin.unit][pin.side][pin.name].append(pin)
yield part_num, "U", "", "", "", part_num, pin_data # Return the dictionary of pins extracted from the CVS file.
| 41.731429 | 117 | 0.579762 |
7955f3c7f7d3f8a2d4ecaace9f823ea817370de0 | 680 | py | Python | src/zenml/integrations/mlflow/experiment_trackers/__init__.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | src/zenml/integrations/mlflow/experiment_trackers/__init__.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | src/zenml/integrations/mlflow/experiment_trackers/__init__.py | dumpmemory/zenml | ec3f6994ae9666493519d600471c035eb9109ac4 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
from zenml.integrations.mlflow.experiment_trackers.mlflow_experiment_tracker import ( # noqa
MLFlowExperimentTracker,
)
| 45.333333 | 93 | 0.764706 |
7955f3d4d0e35f761525367eedab2e570bd20637 | 81,777 | py | Python | mesonbuild/compilers/compilers.py | emersion/meson | 76db75328689a7e48ea5862edd333e6f30c681bb | [
"Apache-2.0"
] | null | null | null | mesonbuild/compilers/compilers.py | emersion/meson | 76db75328689a7e48ea5862edd333e6f30c681bb | [
"Apache-2.0"
] | null | null | null | mesonbuild/compilers/compilers.py | emersion/meson | 76db75328689a7e48ea5862edd333e6f30c681bb | [
"Apache-2.0"
] | null | null | null | # Copyright 2012-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc, contextlib, enum, os.path, re, tempfile, shlex
import subprocess
from typing import List, Tuple
from ..linkers import StaticLinker
from .. import coredata
from .. import mlog
from .. import mesonlib
from ..mesonlib import (
EnvironmentException, MesonException, OrderedSet,
version_compare, Popen_safe
)
"""This file contains the data files of all compilers Meson knows
about. To support a new compiler, add its information below.
Also add corresponding autodetection code in environment.py."""
header_suffixes = ('h', 'hh', 'hpp', 'hxx', 'H', 'ipp', 'moc', 'vapi', 'di')
obj_suffixes = ('o', 'obj', 'res')
lib_suffixes = ('a', 'lib', 'dll', 'dylib', 'so')
# Mapping of language to suffixes of files that should always be in that language
# This means we can't include .h headers here since they could be C, C++, ObjC, etc.
lang_suffixes = {
'c': ('c',),
'cpp': ('cpp', 'cc', 'cxx', 'c++', 'hh', 'hpp', 'ipp', 'hxx'),
'cuda': ('cu',),
# f90, f95, f03, f08 are for free-form fortran ('f90' recommended)
# f, for, ftn, fpp are for fixed-form fortran ('f' or 'for' recommended)
'fortran': ('f90', 'f95', 'f03', 'f08', 'f', 'for', 'ftn', 'fpp'),
'd': ('d', 'di'),
'objc': ('m',),
'objcpp': ('mm',),
'rust': ('rs',),
'vala': ('vala', 'vapi', 'gs'),
'cs': ('cs',),
'swift': ('swift',),
'java': ('java',),
}
all_languages = lang_suffixes.keys()
cpp_suffixes = lang_suffixes['cpp'] + ('h',)
c_suffixes = lang_suffixes['c'] + ('h',)
# List of languages that by default consume and output libraries following the
# C ABI; these can generally be used interchangebly
clib_langs = ('objcpp', 'cpp', 'objc', 'c', 'fortran',)
# List of languages that can be linked with C code directly by the linker
# used in build.py:process_compilers() and build.py:get_dynamic_linker()
# XXX: Add Rust to this?
clink_langs = ('d', 'cuda') + clib_langs
clink_suffixes = ()
for _l in clink_langs + ('vala',):
clink_suffixes += lang_suffixes[_l]
clink_suffixes += ('h', 'll', 's')
soregex = re.compile(r'.*\.so(\.[0-9]+)?(\.[0-9]+)?(\.[0-9]+)?$')
# Environment variables that each lang uses.
cflags_mapping = {'c': 'CFLAGS',
'cpp': 'CXXFLAGS',
'cuda': 'CUFLAGS',
'objc': 'OBJCFLAGS',
'objcpp': 'OBJCXXFLAGS',
'fortran': 'FFLAGS',
'd': 'DFLAGS',
'vala': 'VALAFLAGS',
'rust': 'RUSTFLAGS'}
# All these are only for C-linkable languages; see `clink_langs` above.
def sort_clink(lang):
'''
Sorting function to sort the list of languages according to
reversed(compilers.clink_langs) and append the unknown langs in the end.
The purpose is to prefer C over C++ for files that can be compiled by
both such as assembly, C, etc. Also applies to ObjC, ObjC++, etc.
'''
if lang not in clink_langs:
return 1
return -clink_langs.index(lang)
def is_header(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1]
return suffix in header_suffixes
def is_source(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1].lower()
return suffix in clink_suffixes
def is_assembly(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
return fname.split('.')[-1].lower() == 's'
def is_llvm_ir(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
return fname.split('.')[-1] == 'll'
def is_object(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
suffix = fname.split('.')[-1]
return suffix in obj_suffixes
def is_library(fname):
if hasattr(fname, 'fname'):
fname = fname.fname
if soregex.match(fname):
return True
suffix = fname.split('.')[-1]
return suffix in lib_suffixes
gnulike_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
armclang_buildtype_args = {'plain': [],
'debug': ['-O0', '-g'],
'debugoptimized': ['-O1', '-g'],
'release': ['-Os'],
'minsize': ['-Oz'],
'custom': [],
}
cuda_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
}
arm_buildtype_args = {'plain': [],
'debug': ['-O0', '--debug'],
'debugoptimized': ['-O1', '--debug'],
'release': ['-O3', '-Otime'],
'minsize': ['-O3', '-Ospace'],
'custom': [],
}
ccrx_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
msvc_buildtype_args = {'plain': [],
'debug': ["/ZI", "/Ob0", "/Od", "/RTC1"],
'debugoptimized': ["/Zi", "/Ob1"],
'release': ["/Ob2", "/Gw"],
'minsize': ["/Zi", "/Gw"],
'custom': [],
}
pgi_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
apple_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
gnulike_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': ['-Wl,-O1'],
'minsize': [],
'custom': [],
}
arm_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
ccrx_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
pgi_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
msvc_buildtype_linker_args = {'plain': [],
'debug': [],
'debugoptimized': [],
# The otherwise implicit REF and ICF linker
# optimisations are disabled by /DEBUG.
# REF implies ICF.
'release': ['/OPT:REF'],
'minsize': ['/INCREMENTAL:NO', '/OPT:REF'],
'custom': [],
}
java_buildtype_args = {'plain': [],
'debug': ['-g'],
'debugoptimized': ['-g'],
'release': [],
'minsize': [],
'custom': [],
}
rust_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
d_gdc_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': ['-finline-functions'],
'release': ['-frelease', '-finline-functions'],
'minsize': [],
'custom': [],
}
d_ldc_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': ['-enable-inlining', '-Hkeep-all-bodies'],
'release': ['-release', '-enable-inlining', '-Hkeep-all-bodies'],
'minsize': [],
'custom': [],
}
d_dmd_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': ['-inline'],
'release': ['-release', '-inline'],
'minsize': [],
'custom': [],
}
mono_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': ['-optimize+'],
'release': ['-optimize+'],
'minsize': [],
'custom': [],
}
swift_buildtype_args = {'plain': [],
'debug': [],
'debugoptimized': [],
'release': [],
'minsize': [],
'custom': [],
}
gnu_winlibs = ['-lkernel32', '-luser32', '-lgdi32', '-lwinspool', '-lshell32',
'-lole32', '-loleaut32', '-luuid', '-lcomdlg32', '-ladvapi32']
msvc_winlibs = ['kernel32.lib', 'user32.lib', 'gdi32.lib',
'winspool.lib', 'shell32.lib', 'ole32.lib', 'oleaut32.lib',
'uuid.lib', 'comdlg32.lib', 'advapi32.lib']
gnu_color_args = {'auto': ['-fdiagnostics-color=auto'],
'always': ['-fdiagnostics-color=always'],
'never': ['-fdiagnostics-color=never'],
}
clang_color_args = {'auto': ['-Xclang', '-fcolor-diagnostics'],
'always': ['-Xclang', '-fcolor-diagnostics'],
'never': ['-Xclang', '-fno-color-diagnostics'],
}
arm_optimization_args = {'0': ['-O0'],
'g': ['-g'],
'1': ['-O1'],
'2': ['-O2'],
'3': ['-O3'],
's': [],
}
armclang_optimization_args = {'0': ['-O0'],
'g': ['-g'],
'1': ['-O1'],
'2': ['-O2'],
'3': ['-O3'],
's': ['-Os']
}
clike_optimization_args = {'0': [],
'g': [],
'1': ['-O1'],
'2': ['-O2'],
'3': ['-O3'],
's': ['-Os'],
}
gnu_optimization_args = {'0': [],
'g': ['-Og'],
'1': ['-O1'],
'2': ['-O2'],
'3': ['-O3'],
's': ['-Os'],
}
ccrx_optimization_args = {'0': ['-optimize=0'],
'g': ['-optimize=0'],
'1': ['-optimize=1'],
'2': ['-optimize=2'],
'3': ['-optimize=max'],
's': ['-optimize=2', '-size']
}
msvc_optimization_args = {'0': [],
'g': ['/O0'],
'1': ['/O1'],
'2': ['/O2'],
'3': ['/O2'],
's': ['/O1'], # Implies /Os.
}
cuda_optimization_args = {'0': [],
'g': ['-O0'],
'1': ['-O1'],
'2': ['-O2'],
'3': ['-O3', '-Otime'],
's': ['-O3', '-Ospace']
}
cuda_debug_args = {False: [],
True: ['-g']}
clike_debug_args = {False: [],
True: ['-g']}
msvc_debug_args = {False: [],
True: []} # Fixme!
ccrx_debug_args = {False: [],
True: ['-debug']}
base_options = {'b_pch': coredata.UserBooleanOption('b_pch', 'Use precompiled headers', True),
'b_lto': coredata.UserBooleanOption('b_lto', 'Use link time optimization', False),
'b_sanitize': coredata.UserComboOption('b_sanitize',
'Code sanitizer to use',
['none', 'address', 'thread', 'undefined', 'memory', 'address,undefined'],
'none'),
'b_lundef': coredata.UserBooleanOption('b_lundef', 'Use -Wl,--no-undefined when linking', True),
'b_asneeded': coredata.UserBooleanOption('b_asneeded', 'Use -Wl,--as-needed when linking', True),
'b_pgo': coredata.UserComboOption('b_pgo', 'Use profile guided optimization',
['off', 'generate', 'use'],
'off'),
'b_coverage': coredata.UserBooleanOption('b_coverage',
'Enable coverage tracking.',
False),
'b_colorout': coredata.UserComboOption('b_colorout', 'Use colored output',
['auto', 'always', 'never'],
'always'),
'b_ndebug': coredata.UserComboOption('b_ndebug', 'Disable asserts',
['true', 'false', 'if-release'], 'false'),
'b_staticpic': coredata.UserBooleanOption('b_staticpic',
'Build static libraries as position independent',
True),
'b_pie': coredata.UserBooleanOption('b_pie',
'Build executables as position independent',
False),
'b_bitcode': coredata.UserBooleanOption('b_bitcode',
'Generate and embed bitcode (only macOS and iOS)',
False),
'b_vscrt': coredata.UserComboOption('b_vscrt', 'VS run-time library type to use.',
['none', 'md', 'mdd', 'mt', 'mtd', 'from_buildtype'],
'from_buildtype'),
}
gnulike_instruction_set_args = {'mmx': ['-mmmx'],
'sse': ['-msse'],
'sse2': ['-msse2'],
'sse3': ['-msse3'],
'ssse3': ['-mssse3'],
'sse41': ['-msse4.1'],
'sse42': ['-msse4.2'],
'avx': ['-mavx'],
'avx2': ['-mavx2'],
'neon': ['-mfpu=neon'],
}
vs32_instruction_set_args = {'mmx': ['/arch:SSE'], # There does not seem to be a flag just for MMX
'sse': ['/arch:SSE'],
'sse2': ['/arch:SSE2'],
'sse3': ['/arch:AVX'], # VS leaped from SSE2 directly to AVX.
'sse41': ['/arch:AVX'],
'sse42': ['/arch:AVX'],
'avx': ['/arch:AVX'],
'avx2': ['/arch:AVX2'],
'neon': None,
}
# The 64 bit compiler defaults to /arch:avx.
vs64_instruction_set_args = {'mmx': ['/arch:AVX'],
'sse': ['/arch:AVX'],
'sse2': ['/arch:AVX'],
'sse3': ['/arch:AVX'],
'ssse3': ['/arch:AVX'],
'sse41': ['/arch:AVX'],
'sse42': ['/arch:AVX'],
'avx': ['/arch:AVX'],
'avx2': ['/arch:AVX2'],
'neon': None,
}
gnu_symbol_visibility_args = {'': [],
'default': ['-fvisibility=default'],
'internal': ['-fvisibility=internal'],
'hidden': ['-fvisibility=hidden'],
'protected': ['-fvisibility=protected'],
'inlineshidden': ['-fvisibility=hidden', '-fvisibility-inlines-hidden'],
}
def sanitizer_compile_args(value):
if value == 'none':
return []
args = ['-fsanitize=' + value]
if 'address' in value: # For -fsanitize=address,undefined
args.append('-fno-omit-frame-pointer')
return args
def sanitizer_link_args(value):
if value == 'none':
return []
args = ['-fsanitize=' + value]
return args
def option_enabled(boptions, options, option):
try:
if option not in boptions:
return False
return options[option].value
except KeyError:
return False
def get_base_compile_args(options, compiler):
args = []
# FIXME, gcc/clang specific.
try:
if options['b_lto'].value:
args.append('-flto')
except KeyError:
pass
try:
args += compiler.get_colorout_args(options['b_colorout'].value)
except KeyError:
pass
try:
args += sanitizer_compile_args(options['b_sanitize'].value)
except KeyError:
pass
try:
pgo_val = options['b_pgo'].value
if pgo_val == 'generate':
args.extend(compiler.get_profile_generate_args())
elif pgo_val == 'use':
args.extend(compiler.get_profile_use_args())
except KeyError:
pass
try:
if options['b_coverage'].value:
args += compiler.get_coverage_args()
except KeyError:
pass
try:
if (options['b_ndebug'].value == 'true' or
(options['b_ndebug'].value == 'if-release' and
options['buildtype'].value == 'release')):
args += ['-DNDEBUG']
except KeyError:
pass
# This does not need a try...except
if option_enabled(compiler.base_options, options, 'b_bitcode'):
args.append('-fembed-bitcode')
try:
crt_val = options['b_vscrt'].value
buildtype = options['buildtype'].value
try:
args += compiler.get_crt_compile_args(crt_val, buildtype)
except AttributeError:
pass
except KeyError:
pass
return args
def get_base_link_args(options, linker, is_shared_module):
args = []
# FIXME, gcc/clang specific.
try:
if options['b_lto'].value:
args.append('-flto')
except KeyError:
pass
try:
args += sanitizer_link_args(options['b_sanitize'].value)
except KeyError:
pass
try:
pgo_val = options['b_pgo'].value
if pgo_val == 'generate':
args.extend(linker.get_profile_generate_args())
elif pgo_val == 'use':
args.extend(linker.get_profile_use_args())
except KeyError:
pass
try:
if options['b_coverage'].value:
args += linker.get_coverage_link_args()
except KeyError:
pass
# These do not need a try...except
if not is_shared_module and option_enabled(linker.base_options, options, 'b_lundef'):
args.append('-Wl,--no-undefined')
as_needed = option_enabled(linker.base_options, options, 'b_asneeded')
bitcode = option_enabled(linker.base_options, options, 'b_bitcode')
# Shared modules cannot be built with bitcode_bundle because
# -bitcode_bundle is incompatible with -undefined and -bundle
if bitcode and not is_shared_module:
args.append('-Wl,-bitcode_bundle')
elif as_needed:
# -Wl,-dead_strip_dylibs is incompatible with bitcode
args.append(linker.get_asneeded_args())
try:
crt_val = options['b_vscrt'].value
buildtype = options['buildtype'].value
try:
args += linker.get_crt_link_args(crt_val, buildtype)
except AttributeError:
pass
except KeyError:
pass
return args
def prepare_rpaths(raw_rpaths, build_dir, from_dir):
internal_format_rpaths = [evaluate_rpath(p, build_dir, from_dir) for p in raw_rpaths]
ordered_rpaths = order_rpaths(internal_format_rpaths)
return ordered_rpaths
def order_rpaths(rpath_list):
# We want rpaths that point inside our build dir to always override
# those pointing to other places in the file system. This is so built
# binaries prefer our libraries to the ones that may lie somewhere
# in the file system, such as /lib/x86_64-linux-gnu.
#
# The correct thing to do here would be C++'s std::stable_partition.
# Python standard library does not have it, so replicate it with
# sort, which is guaranteed to be stable.
return sorted(rpath_list, key=os.path.isabs)
def evaluate_rpath(p, build_dir, from_dir):
if p == from_dir:
return '' # relpath errors out in this case
elif os.path.isabs(p):
return p # These can be outside of build dir.
else:
return os.path.relpath(os.path.join(build_dir, p), os.path.join(build_dir, from_dir))
class CrossNoRunException(MesonException):
pass
class RunResult:
def __init__(self, compiled, returncode=999, stdout='UNDEFINED', stderr='UNDEFINED'):
self.compiled = compiled
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
class CompilerArgs(list):
'''
Class derived from list() that manages a list of compiler arguments. Should
be used while constructing compiler arguments from various sources. Can be
operated with ordinary lists, so this does not need to be used everywhere.
All arguments must be inserted and stored in GCC-style (-lfoo, -Idir, etc)
and can converted to the native type of each compiler by using the
.to_native() method to which you must pass an instance of the compiler or
the compiler class.
New arguments added to this class (either with .append(), .extend(), or +=)
are added in a way that ensures that they override previous arguments.
For example:
>>> a = ['-Lfoo', '-lbar']
>>> a += ['-Lpho', '-lbaz']
>>> print(a)
['-Lpho', '-Lfoo', '-lbar', '-lbaz']
Arguments will also be de-duped if they can be de-duped safely.
Note that because of all this, this class is not commutative and does not
preserve the order of arguments if it is safe to not. For example:
>>> ['-Ifoo', '-Ibar'] + ['-Ifez', '-Ibaz', '-Werror']
['-Ifez', '-Ibaz', '-Ifoo', '-Ibar', '-Werror']
>>> ['-Ifez', '-Ibaz', '-Werror'] + ['-Ifoo', '-Ibar']
['-Ifoo', '-Ibar', '-Ifez', '-Ibaz', '-Werror']
'''
# NOTE: currently this class is only for C-like compilers, but it can be
# extended to other languages easily. Just move the following to the
# compiler class and initialize when self.compiler is set.
# Arg prefixes that override by prepending instead of appending
prepend_prefixes = ('-I', '-L')
# Arg prefixes and args that must be de-duped by returning 2
dedup2_prefixes = ('-I', '-L', '-D', '-U')
dedup2_suffixes = ()
dedup2_args = ()
# Arg prefixes and args that must be de-duped by returning 1
#
# NOTE: not thorough. A list of potential corner cases can be found in
# https://github.com/mesonbuild/meson/pull/4593#pullrequestreview-182016038
dedup1_prefixes = ('-l', '-Wl,-l', '-Wl,--export-dynamic')
dedup1_suffixes = ('.lib', '.dll', '.so', '.dylib', '.a')
# Match a .so of the form path/to/libfoo.so.0.1.0
# Only UNIX shared libraries require this. Others have a fixed extension.
dedup1_regex = re.compile(r'([\/\\]|\A)lib.*\.so(\.[0-9]+)?(\.[0-9]+)?(\.[0-9]+)?$')
dedup1_args = ('-c', '-S', '-E', '-pipe', '-pthread')
compiler = None
def _check_args(self, args):
cargs = []
if len(args) > 2:
raise TypeError("CompilerArgs() only accepts at most 2 arguments: "
"The compiler, and optionally an initial list")
elif not args:
return cargs
elif len(args) == 1:
if isinstance(args[0], (Compiler, StaticLinker)):
self.compiler = args[0]
else:
raise TypeError("you must pass a Compiler instance as one of "
"the arguments")
elif len(args) == 2:
if isinstance(args[0], (Compiler, StaticLinker)):
self.compiler = args[0]
cargs = args[1]
elif isinstance(args[1], (Compiler, StaticLinker)):
cargs = args[0]
self.compiler = args[1]
else:
raise TypeError("you must pass a Compiler instance as one of "
"the two arguments")
else:
raise AssertionError('Not reached')
return cargs
def __init__(self, *args):
super().__init__(self._check_args(args))
@classmethod
def _can_dedup(cls, arg):
'''
Returns whether the argument can be safely de-duped. This is dependent
on three things:
a) Whether an argument can be 'overridden' by a later argument. For
example, -DFOO defines FOO and -UFOO undefines FOO. In this case, we
can safely remove the previous occurrence and add a new one. The same
is true for include paths and library paths with -I and -L. For
these we return `2`. See `dedup2_prefixes` and `dedup2_args`.
b) Arguments that once specified cannot be undone, such as `-c` or
`-pipe`. New instances of these can be completely skipped. For these
we return `1`. See `dedup1_prefixes` and `dedup1_args`.
c) Whether it matters where or how many times on the command-line
a particular argument is present. This can matter for symbol
resolution in static or shared libraries, so we cannot de-dup or
reorder them. For these we return `0`. This is the default.
In addition to these, we handle library arguments specially.
With GNU ld, we surround library arguments with -Wl,--start/end-group
to recursively search for symbols in the libraries. This is not needed
with other linkers.
'''
# A standalone argument must never be deduplicated because it is
# defined by what comes _after_ it. Thus dedupping this:
# -D FOO -D BAR
# would yield either
# -D FOO BAR
# or
# FOO -D BAR
# both of which are invalid.
if arg in cls.dedup2_prefixes:
return 0
if arg in cls.dedup2_args or \
arg.startswith(cls.dedup2_prefixes) or \
arg.endswith(cls.dedup2_suffixes):
return 2
if arg in cls.dedup1_args or \
arg.startswith(cls.dedup1_prefixes) or \
arg.endswith(cls.dedup1_suffixes) or \
re.search(cls.dedup1_regex, arg):
return 1
return 0
@classmethod
def _should_prepend(cls, arg):
if arg.startswith(cls.prepend_prefixes):
return True
return False
def to_native(self, copy=False):
# Check if we need to add --start/end-group for circular dependencies
# between static libraries, and for recursively searching for symbols
# needed by static libraries that are provided by object files or
# shared libraries.
if copy:
new = self.copy()
else:
new = self
if get_compiler_uses_gnuld(self.compiler):
global soregex
group_start = -1
group_end = -1
for i, each in enumerate(new):
if not each.startswith(('-Wl,-l', '-l')) and not each.endswith('.a') and \
not soregex.match(each):
continue
group_end = i
if group_start < 0:
# First occurrence of a library
group_start = i
if group_start >= 0:
# Last occurrence of a library
new.insert(group_end + 1, '-Wl,--end-group')
new.insert(group_start, '-Wl,--start-group')
return self.compiler.unix_args_to_native(new)
def append_direct(self, arg):
'''
Append the specified argument without any reordering or de-dup
except for absolute paths where the order of include search directories
is not relevant
'''
if os.path.isabs(arg):
self.append(arg)
else:
super().append(arg)
def extend_direct(self, iterable):
'''
Extend using the elements in the specified iterable without any
reordering or de-dup except for absolute paths where the order of
include search directories is not relevant
'''
for elem in iterable:
self.append_direct(elem)
def extend_preserving_lflags(self, iterable):
normal_flags = []
lflags = []
for i in iterable:
if i.startswith('-l') or i.startswith('-L'):
lflags.append(i)
else:
normal_flags.append(i)
self.extend(normal_flags)
self.extend_direct(lflags)
def __add__(self, args):
new = CompilerArgs(self, self.compiler)
new += args
return new
def __iadd__(self, args):
'''
Add two CompilerArgs while taking into account overriding of arguments
and while preserving the order of arguments as much as possible
'''
pre = []
post = []
if not isinstance(args, list):
raise TypeError('can only concatenate list (not "{}") to list'.format(args))
for arg in args:
# If the argument can be de-duped, do it either by removing the
# previous occurrence of it and adding a new one, or not adding the
# new occurrence.
dedup = self._can_dedup(arg)
if dedup == 1:
# Argument already exists and adding a new instance is useless
if arg in self or arg in pre or arg in post:
continue
if dedup == 2:
# Remove all previous occurrences of the arg and add it anew
if arg in self:
self.remove(arg)
if arg in pre:
pre.remove(arg)
if arg in post:
post.remove(arg)
if self._should_prepend(arg):
pre.append(arg)
else:
post.append(arg)
# Insert at the beginning
self[:0] = pre
# Append to the end
super().__iadd__(post)
return self
def __radd__(self, args):
new = CompilerArgs(args, self.compiler)
new += self
return new
def __mul__(self, args):
raise TypeError("can't multiply compiler arguments")
def __imul__(self, args):
raise TypeError("can't multiply compiler arguments")
def __rmul__(self, args):
raise TypeError("can't multiply compiler arguments")
def append(self, arg):
self.__iadd__([arg])
def extend(self, args):
self.__iadd__(args)
class Compiler:
# Libraries to ignore in find_library() since they are provided by the
# compiler or the C library. Currently only used for MSVC.
ignore_libs = ()
# Libraries that are internal compiler implementations, and must not be
# manually searched.
internal_libs = ()
# Cache for the result of compiler checks which can be cached
compiler_check_cache = {}
def __init__(self, exelist, version, **kwargs):
if isinstance(exelist, str):
self.exelist = [exelist]
elif isinstance(exelist, list):
self.exelist = exelist
else:
raise TypeError('Unknown argument to Compiler')
# In case it's been overridden by a child class already
if not hasattr(self, 'file_suffixes'):
self.file_suffixes = lang_suffixes[self.language]
if not hasattr(self, 'can_compile_suffixes'):
self.can_compile_suffixes = set(self.file_suffixes)
self.default_suffix = self.file_suffixes[0]
self.version = version
if 'full_version' in kwargs:
self.full_version = kwargs['full_version']
else:
self.full_version = None
self.base_options = []
def __repr__(self):
repr_str = "<{0}: v{1} `{2}`>"
return repr_str.format(self.__class__.__name__, self.version,
' '.join(self.exelist))
def can_compile(self, src):
if hasattr(src, 'fname'):
src = src.fname
suffix = os.path.splitext(src)[1].lower()
if suffix and suffix[1:] in self.can_compile_suffixes:
return True
return False
def get_id(self):
return self.id
def get_language(self):
return self.language
def get_display_language(self):
return self.language.capitalize()
def get_default_suffix(self):
return self.default_suffix
def get_define(self, dname, prefix, env, extra_args, dependencies):
raise EnvironmentException('%s does not support get_define ' % self.get_id())
def compute_int(self, expression, low, high, guess, prefix, env, extra_args, dependencies):
raise EnvironmentException('%s does not support compute_int ' % self.get_id())
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
raise EnvironmentException('%s does not support compute_parameters_with_absolute_paths ' % self.get_id())
def has_members(self, typename, membernames, prefix, env, *, extra_args=None, dependencies=None):
raise EnvironmentException('%s does not support has_member(s) ' % self.get_id())
def has_type(self, typename, prefix, env, extra_args, *, dependencies=None):
raise EnvironmentException('%s does not support has_type ' % self.get_id())
def symbols_have_underscore_prefix(self, env):
raise EnvironmentException('%s does not support symbols_have_underscore_prefix ' % self.get_id())
def get_exelist(self):
return self.exelist[:]
def get_builtin_define(self, *args, **kwargs):
raise EnvironmentException('%s does not support get_builtin_define.' % self.id)
def has_builtin_define(self, *args, **kwargs):
raise EnvironmentException('%s does not support has_builtin_define.' % self.id)
def get_always_args(self):
return []
def can_linker_accept_rsp(self):
"""
Determines whether the linker can accept arguments using the @rsp syntax.
"""
return mesonlib.is_windows()
def get_linker_always_args(self):
return []
def get_linker_lib_prefix(self):
return ''
def gen_import_library_args(self, implibname):
"""
Used only on Windows for libraries that need an import library.
This currently means C, C++, Fortran.
"""
return []
def get_preproc_flags(self):
if self.get_language() in ('c', 'cpp', 'objc', 'objcpp'):
return os.environ.get('CPPFLAGS', '')
return ''
def get_args_from_envvars(self):
"""
Returns a tuple of (compile_flags, link_flags) for the specified language
from the inherited environment
"""
def log_var(var, val):
if val:
mlog.log('Appending {} from environment: {!r}'.format(var, val))
lang = self.get_language()
compiler_is_linker = False
if hasattr(self, 'get_linker_exelist'):
compiler_is_linker = (self.get_exelist() == self.get_linker_exelist())
if lang not in cflags_mapping:
return [], []
compile_flags = os.environ.get(cflags_mapping[lang], '')
log_var(cflags_mapping[lang], compile_flags)
compile_flags = shlex.split(compile_flags)
# Link flags (same for all languages)
link_flags = os.environ.get('LDFLAGS', '')
log_var('LDFLAGS', link_flags)
link_flags = shlex.split(link_flags)
if compiler_is_linker:
# When the compiler is used as a wrapper around the linker (such as
# with GCC and Clang), the compile flags can be needed while linking
# too. This is also what Autotools does. However, we don't want to do
# this when the linker is stand-alone such as with MSVC C/C++, etc.
link_flags = compile_flags + link_flags
# Pre-processor flags (not for fortran or D)
preproc_flags = self.get_preproc_flags()
log_var('CPPFLAGS', preproc_flags)
preproc_flags = shlex.split(preproc_flags)
compile_flags += preproc_flags
return compile_flags, link_flags
def get_options(self):
opts = {} # build afresh every time
# Take default values from env variables.
if not self.is_cross:
compile_args, link_args = self.get_args_from_envvars()
else:
compile_args = []
link_args = []
description = 'Extra arguments passed to the {}'.format(self.get_display_language())
opts.update({
self.language + '_args': coredata.UserArrayOption(
self.language + '_args',
description + ' compiler',
compile_args, shlex_split=True, user_input=True, allow_dups=True),
self.language + '_link_args': coredata.UserArrayOption(
self.language + '_link_args',
description + ' linker',
link_args, shlex_split=True, user_input=True, allow_dups=True),
})
return opts
def get_option_compile_args(self, options):
return []
def get_option_link_args(self, options):
return []
def check_header(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support header checks.' % self.get_display_language())
def has_header(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support header checks.' % self.get_display_language())
def has_header_symbol(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support header symbol checks.' % self.get_display_language())
def compiles(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support compile checks.' % self.get_display_language())
def links(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support link checks.' % self.get_display_language())
def run(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support run checks.' % self.get_display_language())
def sizeof(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support sizeof checks.' % self.get_display_language())
def alignment(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support alignment checks.' % self.get_display_language())
def has_function(self, *args, **kwargs):
raise EnvironmentException('Language %s does not support function checks.' % self.get_display_language())
@classmethod
def unix_args_to_native(cls, args):
"Always returns a copy that can be independently mutated"
return args[:]
def find_library(self, *args, **kwargs):
raise EnvironmentException('Language {} does not support library finding.'.format(self.get_display_language()))
def get_library_dirs(self, *args, **kwargs):
return ()
def has_multi_arguments(self, args, env):
raise EnvironmentException(
'Language {} does not support has_multi_arguments.'.format(
self.get_display_language()))
def has_multi_link_arguments(self, args, env):
raise EnvironmentException(
'Language {} does not support has_multi_link_arguments.'.format(
self.get_display_language()))
def _get_compile_output(self, dirname, mode):
# In pre-processor mode, the output is sent to stdout and discarded
if mode == 'preprocess':
return None
# Extension only matters if running results; '.exe' is
# guaranteed to be executable on every platform.
if mode == 'link':
suffix = 'exe'
else:
suffix = 'obj'
return os.path.join(dirname, 'output.' + suffix)
@contextlib.contextmanager
def compile(self, code, extra_args=None, mode='link', want_output=False):
if extra_args is None:
textra_args = None
extra_args = []
else:
textra_args = tuple(extra_args)
key = (code, textra_args, mode)
if not want_output:
if key in self.compiler_check_cache:
p = self.compiler_check_cache[key]
mlog.debug('Using cached compile:')
mlog.debug('Cached command line: ', ' '.join(p.commands), '\n')
mlog.debug('Code:\n', code)
mlog.debug('Cached compiler stdout:\n', p.stdo)
mlog.debug('Cached compiler stderr:\n', p.stde)
yield p
return
try:
with tempfile.TemporaryDirectory() as tmpdirname:
if isinstance(code, str):
srcname = os.path.join(tmpdirname,
'testfile.' + self.default_suffix)
with open(srcname, 'w') as ofile:
ofile.write(code)
elif isinstance(code, mesonlib.File):
srcname = code.fname
# Construct the compiler command-line
commands = CompilerArgs(self)
commands.append(srcname)
commands += self.get_always_args()
if mode == 'compile':
commands += self.get_compile_only_args()
# Preprocess mode outputs to stdout, so no output args
if mode == 'preprocess':
commands += self.get_preprocess_only_args()
else:
output = self._get_compile_output(tmpdirname, mode)
commands += self.get_output_args(output)
# extra_args must be last because it could contain '/link' to
# pass args to VisualStudio's linker. In that case everything
# in the command line after '/link' is given to the linker.
commands += extra_args
# Generate full command-line with the exelist
commands = self.get_exelist() + commands.to_native()
mlog.debug('Running compile:')
mlog.debug('Working directory: ', tmpdirname)
mlog.debug('Command line: ', ' '.join(commands), '\n')
mlog.debug('Code:\n', code)
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
p, p.stdo, p.stde = Popen_safe(commands, cwd=tmpdirname, env=os_env)
mlog.debug('Compiler stdout:\n', p.stdo)
mlog.debug('Compiler stderr:\n', p.stde)
p.commands = commands
p.input_name = srcname
if want_output:
p.output_name = output
else:
self.compiler_check_cache[key] = p
yield p
except (PermissionError, OSError):
# On Windows antivirus programs and the like hold on to files so
# they can't be deleted. There's not much to do in this case. Also,
# catch OSError because the directory is then no longer empty.
pass
def get_colorout_args(self, colortype):
return []
# Some compilers (msvc) write debug info to a separate file.
# These args specify where it should be written.
def get_compile_debugfile_args(self, rel_obj, **kwargs):
return []
def get_link_debugfile_args(self, rel_obj):
return []
def get_std_shared_lib_link_args(self):
return []
def get_std_shared_module_link_args(self, options):
return self.get_std_shared_lib_link_args()
def get_link_whole_for(self, args):
if isinstance(args, list) and not args:
return []
raise EnvironmentException('Language %s does not support linking whole archives.' % self.get_display_language())
# Compiler arguments needed to enable the given instruction set.
# May be [] meaning nothing needed or None meaning the given set
# is not supported.
def get_instruction_set_args(self, instruction_set):
return None
def build_unix_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
if not rpath_paths and not install_rpath and not build_rpath:
return []
args = []
if mesonlib.is_osx():
# Ensure that there is enough space for install_name_tool in-place editing of large RPATHs
args.append('-Wl,-headerpad_max_install_names')
# @loader_path is the equivalent of $ORIGIN on macOS
# https://stackoverflow.com/q/26280738
origin_placeholder = '@loader_path'
else:
origin_placeholder = '$ORIGIN'
# The rpaths we write must be relative if they point to the build dir,
# because otherwise they have different length depending on the build
# directory. This breaks reproducible builds.
processed_rpaths = prepare_rpaths(rpath_paths, build_dir, from_dir)
# Need to deduplicate rpaths, as macOS's install_name_tool
# is *very* allergic to duplicate -delete_rpath arguments
# when calling depfixer on installation.
all_paths = OrderedSet([os.path.join(origin_placeholder, p) for p in processed_rpaths])
# Build_rpath is used as-is (it is usually absolute).
if build_rpath != '':
all_paths.add(build_rpath)
if mesonlib.is_dragonflybsd() or mesonlib.is_openbsd():
# This argument instructs the compiler to record the value of
# ORIGIN in the .dynamic section of the elf. On Linux this is done
# by default, but is not on dragonfly/openbsd for some reason. Without this
# $ORIGIN in the runtime path will be undefined and any binaries
# linked against local libraries will fail to resolve them.
args.append('-Wl,-z,origin')
if mesonlib.is_osx():
# macOS does not support colon-separated strings in LC_RPATH,
# hence we have to pass each path component individually
args += ['-Wl,-rpath,' + rp for rp in all_paths]
else:
# In order to avoid relinking for RPATH removal, the binary needs to contain just
# enough space in the ELF header to hold the final installation RPATH.
paths = ':'.join(all_paths)
if len(paths) < len(install_rpath):
padding = 'X' * (len(install_rpath) - len(paths))
if not paths:
paths = padding
else:
paths = paths + ':' + padding
args.append('-Wl,-rpath,' + paths)
if get_compiler_is_linuxlike(self):
# Rpaths to use while linking must be absolute. These are not
# written to the binary. Needed only with GNU ld:
# https://sourceware.org/bugzilla/show_bug.cgi?id=16936
# Not needed on Windows or other platforms that don't use RPATH
# https://github.com/mesonbuild/meson/issues/1897
lpaths = ':'.join([os.path.join(build_dir, p) for p in rpath_paths])
# clang expands '-Wl,rpath-link,' to ['-rpath-link'] instead of ['-rpath-link','']
# This eats the next argument, which happens to be 'ldstdc++', causing link failures.
# We can dodge this problem by not adding any rpath_paths if the argument is empty.
if lpaths.strip() != '':
args += ['-Wl,-rpath-link,' + lpaths]
return args
def thread_flags(self, env):
return []
def openmp_flags(self):
raise EnvironmentException('Language %s does not support OpenMP flags.' % self.get_display_language())
def language_stdlib_only_link_flags(self):
# The linker flags needed to link the standard library of the current
# language in. This is needed in cases where you e.g. combine D and C++
# and both of which need to link their runtime library in or otherwise
# building fails with undefined symbols.
return []
def gnu_symbol_visibility_args(self, vistype):
return []
def get_gui_app_args(self, value):
return []
def has_func_attribute(self, name, env):
raise EnvironmentException(
'Language {} does not support function attributes.'.format(self.get_display_language()))
def get_pic_args(self):
m = 'Language {} does not support position-independent code'
raise EnvironmentException(m.format(self.get_display_language()))
def get_pie_args(self):
m = 'Language {} does not support position-independent executable'
raise EnvironmentException(m.format(self.get_display_language()))
def get_pie_link_args(self):
m = 'Language {} does not support position-independent executable'
raise EnvironmentException(m.format(self.get_display_language()))
def get_argument_syntax(self):
"""Returns the argument family type.
Compilers fall into families if they try to emulate the command line
interface of another compiler. For example, clang is in the GCC family
since it accepts most of the same arguments as GCC. ICL (ICC on
windows) is in the MSVC family since it accepts most of the same
arguments as MSVC.
"""
return 'other'
def get_profile_generate_args(self):
raise EnvironmentException(
'%s does not support get_profile_generate_args ' % self.get_id())
def get_profile_use_args(self):
raise EnvironmentException(
'%s does not support get_profile_use_args ' % self.get_id())
def get_undefined_link_args(self):
'''
Get args for allowing undefined symbols when linking to a shared library
'''
return []
def remove_linkerlike_args(self, args):
return [x for x in args if not x.startswith('-Wl')]
@enum.unique
class CompilerType(enum.Enum):
GCC_STANDARD = 0
GCC_OSX = 1
GCC_MINGW = 2
GCC_CYGWIN = 3
CLANG_STANDARD = 10
CLANG_OSX = 11
CLANG_MINGW = 12
# Possibly clang-cl?
ICC_STANDARD = 20
ICC_OSX = 21
ICC_WIN = 22
ARM_WIN = 30
CCRX_WIN = 40
PGI_STANDARD = 50
@property
def is_standard_compiler(self):
return self.name in ('GCC_STANDARD', 'CLANG_STANDARD', 'ICC_STANDARD')
@property
def is_osx_compiler(self):
return self.name in ('GCC_OSX', 'CLANG_OSX', 'ICC_OSX')
@property
def is_windows_compiler(self):
return self.name in ('GCC_MINGW', 'GCC_CYGWIN', 'CLANG_MINGW', 'ICC_WIN', 'ARM_WIN', 'CCRX_WIN')
def get_macos_dylib_install_name(prefix, shlib_name, suffix, soversion):
install_name = prefix + shlib_name
if soversion is not None:
install_name += '.' + soversion
install_name += '.dylib'
return '@rpath/' + install_name
def get_gcc_soname_args(compiler_type, prefix, shlib_name, suffix, soversion, darwin_versions, is_shared_module):
if compiler_type.is_standard_compiler:
sostr = '' if soversion is None else '.' + soversion
return ['-Wl,-soname,%s%s.%s%s' % (prefix, shlib_name, suffix, sostr)]
elif compiler_type.is_windows_compiler:
# For PE/COFF the soname argument has no effect with GNU LD
return []
elif compiler_type.is_osx_compiler:
if is_shared_module:
return []
name = get_macos_dylib_install_name(prefix, shlib_name, suffix, soversion)
args = ['-install_name', name]
if darwin_versions:
args += ['-compatibility_version', darwin_versions[0], '-current_version', darwin_versions[1]]
return args
else:
raise RuntimeError('Not implemented yet.')
def get_compiler_is_linuxlike(compiler):
compiler_type = getattr(compiler, 'compiler_type', None)
return compiler_type and compiler_type.is_standard_compiler
def get_compiler_uses_gnuld(c):
# FIXME: Perhaps we should detect the linker in the environment?
# FIXME: Assumes that *BSD use GNU ld, but they might start using lld soon
compiler_type = getattr(c, 'compiler_type', None)
return compiler_type in (
CompilerType.GCC_STANDARD,
CompilerType.GCC_MINGW,
CompilerType.GCC_CYGWIN,
CompilerType.CLANG_STANDARD,
CompilerType.CLANG_MINGW,
CompilerType.ICC_STANDARD,
CompilerType.ICC_WIN)
def get_largefile_args(compiler):
'''
Enable transparent large-file-support for 32-bit UNIX systems
'''
if get_compiler_is_linuxlike(compiler):
# Enable large-file support unconditionally on all platforms other
# than macOS and Windows. macOS is now 64-bit-only so it doesn't
# need anything special, and Windows doesn't have automatic LFS.
# You must use the 64-bit counterparts explicitly.
# glibc, musl, and uclibc, and all BSD libcs support this. On Android,
# support for transparent LFS is available depending on the version of
# Bionic: https://github.com/android/platform_bionic#32-bit-abi-bugs
# https://code.google.com/p/android/issues/detail?id=64613
#
# If this breaks your code, fix it! It's been 20+ years!
return ['-D_FILE_OFFSET_BITS=64']
# We don't enable -D_LARGEFILE64_SOURCE since that enables
# transitionary features and must be enabled by programs that use
# those features explicitly.
return []
# TODO: The result from calling compiler should be cached. So that calling this
# function multiple times don't add latency.
def gnulike_default_include_dirs(compiler, lang):
if lang == 'cpp':
lang = 'c++'
env = os.environ.copy()
env["LC_ALL"] = 'C'
cmd = compiler + ['-x{}'.format(lang), '-E', '-v', '-']
p = subprocess.Popen(
cmd,
stdin=subprocess.DEVNULL,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
env=env
)
stderr = p.stderr.read().decode('utf-8', errors='replace')
parse_state = 0
paths = []
for line in stderr.split('\n'):
if parse_state == 0:
if line == '#include "..." search starts here:':
parse_state = 1
elif parse_state == 1:
if line == '#include <...> search starts here:':
parse_state = 2
else:
paths.append(line[1:])
elif parse_state == 2:
if line == 'End of search list.':
break
else:
paths.append(line[1:])
if len(paths) == 0:
mlog.warning('No include directory found parsing "{cmd}" output'.format(cmd=" ".join(cmd)))
return paths
class GnuLikeCompiler(abc.ABC):
"""
GnuLikeCompiler is a common interface to all compilers implementing
the GNU-style commandline interface. This includes GCC, Clang
and ICC. Certain functionality between them is different and requires
that the actual concrete subclass define their own implementation.
"""
def __init__(self, compiler_type):
self.compiler_type = compiler_type
self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',
'b_ndebug', 'b_staticpic', 'b_pie']
if not self.compiler_type.is_osx_compiler and not self.compiler_type.is_windows_compiler:
self.base_options.append('b_lundef')
if not self.compiler_type.is_windows_compiler:
self.base_options.append('b_asneeded')
# All GCC-like backends can do assembly
self.can_compile_suffixes.add('s')
def get_asneeded_args(self):
# GNU ld cannot be installed on macOS
# https://github.com/Homebrew/homebrew-core/issues/17794#issuecomment-328174395
# Hence, we don't need to differentiate between OS and ld
# for the sake of adding as-needed support
if self.compiler_type.is_osx_compiler:
return '-Wl,-dead_strip_dylibs'
else:
return '-Wl,--as-needed'
def get_pic_args(self):
if self.compiler_type.is_osx_compiler or self.compiler_type.is_windows_compiler:
return [] # On Window and OS X, pic is always on.
return ['-fPIC']
def get_pie_args(self):
return ['-fPIE']
def get_pie_link_args(self):
return ['-pie']
def get_buildtype_args(self, buildtype):
return gnulike_buildtype_args[buildtype]
@abc.abstractmethod
def get_optimization_args(self, optimization_level):
raise NotImplementedError("get_optimization_args not implemented")
def get_debug_args(self, is_debug):
return clike_debug_args[is_debug]
def get_buildtype_linker_args(self, buildtype):
if self.compiler_type.is_osx_compiler:
return apple_buildtype_linker_args[buildtype]
return gnulike_buildtype_linker_args[buildtype]
@abc.abstractmethod
def get_pch_suffix(self):
raise NotImplementedError("get_pch_suffix not implemented")
def split_shlib_to_parts(self, fname):
return os.path.dirname(fname), fname
def get_soname_args(self, *args):
return get_gcc_soname_args(self.compiler_type, *args)
def get_std_shared_lib_link_args(self):
return ['-shared']
def get_std_shared_module_link_args(self, options):
if self.compiler_type.is_osx_compiler:
return ['-bundle', '-Wl,-undefined,dynamic_lookup']
return ['-shared']
def get_link_whole_for(self, args):
if self.compiler_type.is_osx_compiler:
result = []
for a in args:
result += ['-Wl,-force_load', a]
return result
return ['-Wl,--whole-archive'] + args + ['-Wl,--no-whole-archive']
def get_instruction_set_args(self, instruction_set):
return gnulike_instruction_set_args.get(instruction_set, None)
def get_default_include_dirs(self):
return gnulike_default_include_dirs(self.exelist, self.language)
@abc.abstractmethod
def openmp_flags(self):
raise NotImplementedError("openmp_flags not implemented")
def gnu_symbol_visibility_args(self, vistype):
return gnu_symbol_visibility_args[vistype]
def gen_vs_module_defs_args(self, defsfile):
if not isinstance(defsfile, str):
raise RuntimeError('Module definitions file should be str')
# On Windows targets, .def files may be specified on the linker command
# line like an object file.
if self.compiler_type.is_windows_compiler:
return [defsfile]
# For other targets, discard the .def file.
return []
def get_argument_syntax(self):
return 'gcc'
def get_profile_generate_args(self):
return ['-fprofile-generate']
def get_profile_use_args(self):
return ['-fprofile-use', '-fprofile-correction']
def get_allow_undefined_link_args(self):
if self.compiler_type.is_osx_compiler:
# Apple ld
return ['-Wl,-undefined,dynamic_lookup']
elif self.compiler_type.is_windows_compiler:
# For PE/COFF this is impossible
return []
else:
# GNU ld and LLVM lld
return ['-Wl,--allow-shlib-undefined']
def get_gui_app_args(self, value):
if self.compiler_type.is_windows_compiler:
return ['-mwindows' if value else '-mconsole']
return []
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '-L':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
return parameter_list
class GnuCompiler(GnuLikeCompiler):
"""
GnuCompiler represents an actual GCC in its many incarnations.
Compilers imitating GCC (Clang/Intel) should use the GnuLikeCompiler ABC.
"""
def __init__(self, compiler_type, defines: dict):
super().__init__(compiler_type)
self.id = 'gcc'
self.defines = defines or {}
self.base_options.append('b_colorout')
def get_colorout_args(self, colortype: str) -> List[str]:
if mesonlib.version_compare(self.version, '>=4.9.0'):
return gnu_color_args[colortype][:]
return []
def get_warn_args(self, level: str) -> list:
args = super().get_warn_args(level)
if mesonlib.version_compare(self.version, '<4.8.0') and '-Wpedantic' in args:
# -Wpedantic was added in 4.8.0
# https://gcc.gnu.org/gcc-4.8/changes.html
args[args.index('-Wpedantic')] = '-pedantic'
return args
def has_builtin_define(self, define: str) -> bool:
return define in self.defines
def get_builtin_define(self, define):
if define in self.defines:
return self.defines[define]
def get_optimization_args(self, optimization_level: str):
return gnu_optimization_args[optimization_level]
def get_pch_suffix(self) -> str:
return 'gch'
def openmp_flags(self) -> List[str]:
return ['-fopenmp']
class PGICompiler:
def __init__(self, compiler_type=None):
self.id = 'pgi'
self.compiler_type = compiler_type
default_warn_args = ['-Minform=inform']
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args,
'3': default_warn_args}
def get_module_incdir_args(self) -> Tuple[str]:
return ('-module', )
def get_no_warn_args(self) -> List[str]:
return ['-silent']
def openmp_flags(self) -> List[str]:
return ['-mp']
def get_buildtype_args(self, buildtype: str) -> List[str]:
return pgi_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype: str) -> List[str]:
return pgi_buildtype_linker_args[buildtype]
def get_optimization_args(self, optimization_level: str):
return clike_optimization_args[optimization_level]
def get_debug_args(self, is_debug: bool):
return clike_debug_args[is_debug]
def compute_parameters_with_absolute_paths(self, parameter_list: List[str], build_dir: str):
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '-L':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
def get_allow_undefined_link_args(self):
return []
def get_dependency_gen_args(self, outtarget, outfile):
return []
def get_always_args(self):
return []
class ElbrusCompiler(GnuCompiler):
# Elbrus compiler is nearly like GCC, but does not support
# PCH, LTO, sanitizers and color output as of version 1.21.x.
def __init__(self, compiler_type, defines):
GnuCompiler.__init__(self, compiler_type, defines)
self.id = 'lcc'
self.base_options = ['b_pgo', 'b_coverage',
'b_ndebug', 'b_staticpic',
'b_lundef', 'b_asneeded']
# FIXME: use _build_wrapper to call this so that linker flags from the env
# get applied
def get_library_dirs(self, env):
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
stdo = Popen_safe(self.exelist + ['--print-search-dirs'], env=os_env)[1]
paths = ()
for line in stdo.split('\n'):
if line.startswith('libraries:'):
# lcc does not include '=' in --print-search-dirs output.
libstr = line.split(' ', 1)[1]
paths = (os.path.realpath(p) for p in libstr.split(':'))
break
return paths
def get_program_dirs(self, env):
os_env = os.environ.copy()
os_env['LC_ALL'] = 'C'
stdo = Popen_safe(self.exelist + ['--print-search-dirs'], env=os_env)[1]
paths = ()
for line in stdo.split('\n'):
if line.startswith('programs:'):
# lcc does not include '=' in --print-search-dirs output.
libstr = line.split(' ', 1)[1]
paths = (os.path.realpath(p) for p in libstr.split(':'))
break
return paths
class ClangCompiler(GnuLikeCompiler):
def __init__(self, compiler_type):
super().__init__(compiler_type)
self.id = 'clang'
self.base_options.append('b_colorout')
if self.compiler_type.is_osx_compiler:
self.base_options.append('b_bitcode')
# All Clang backends can also do LLVM IR
self.can_compile_suffixes.add('ll')
def get_colorout_args(self, colortype):
return clang_color_args[colortype][:]
def get_optimization_args(self, optimization_level):
return clike_optimization_args[optimization_level]
def get_pch_suffix(self):
return 'pch'
def get_pch_use_args(self, pch_dir, header):
# Workaround for Clang bug http://llvm.org/bugs/show_bug.cgi?id=15136
# This flag is internal to Clang (or at least not documented on the man page)
# so it might change semantics at any time.
return ['-include-pch', os.path.join(pch_dir, self.get_pch_name(header))]
def has_multi_arguments(self, args, env):
myargs = ['-Werror=unknown-warning-option', '-Werror=unused-command-line-argument']
if mesonlib.version_compare(self.version, '>=3.6.0'):
myargs.append('-Werror=ignored-optimization-argument')
return super().has_multi_arguments(
myargs + args,
env)
def has_function(self, funcname, prefix, env, *, extra_args=None, dependencies=None):
if extra_args is None:
extra_args = []
# Starting with XCode 8, we need to pass this to force linker
# visibility to obey OS X and iOS minimum version targets with
# -mmacosx-version-min, -miphoneos-version-min, etc.
# https://github.com/Homebrew/homebrew-core/issues/3727
if self.compiler_type.is_osx_compiler and version_compare(self.version, '>=8.0'):
extra_args.append('-Wl,-no_weak_imports')
return super().has_function(funcname, prefix, env, extra_args=extra_args,
dependencies=dependencies)
def openmp_flags(self):
if version_compare(self.version, '>=3.8.0'):
return ['-fopenmp']
elif version_compare(self.version, '>=3.7.0'):
return ['-fopenmp=libomp']
else:
# Shouldn't work, but it'll be checked explicitly in the OpenMP dependency.
return []
class ArmclangCompiler:
def __init__(self, compiler_type):
if not self.is_cross:
raise EnvironmentException('armclang supports only cross-compilation.')
# Check whether 'armlink.exe' is available in path
self.linker_exe = 'armlink.exe'
args = '--vsn'
try:
p, stdo, stderr = Popen_safe(self.linker_exe, args)
except OSError as e:
err_msg = 'Unknown linker\nRunning "{0}" gave \n"{1}"'.format(' '.join([self.linker_exe] + [args]), e)
raise EnvironmentException(err_msg)
# Verify the armlink version
ver_str = re.search('.*Component.*', stdo)
if ver_str:
ver_str = ver_str.group(0)
else:
EnvironmentException('armlink version string not found')
# Using the regular expression from environment.search_version,
# which is used for searching compiler version
version_regex = r'(?<!(\d|\.))(\d{1,2}(\.\d+)+(-[a-zA-Z0-9]+)?)'
linker_ver = re.search(version_regex, ver_str)
if linker_ver:
linker_ver = linker_ver.group(0)
if not version_compare(self.version, '==' + linker_ver):
raise EnvironmentException('armlink version does not match with compiler version')
self.id = 'armclang'
self.compiler_type = compiler_type
self.base_options = ['b_pch', 'b_lto', 'b_pgo', 'b_sanitize', 'b_coverage',
'b_ndebug', 'b_staticpic', 'b_colorout']
# Assembly
self.can_compile_suffixes.update('s')
def can_linker_accept_rsp(self):
return False
def get_pic_args(self):
# PIC support is not enabled by default for ARM,
# if users want to use it, they need to add the required arguments explicitly
return []
def get_colorout_args(self, colortype):
return clang_color_args[colortype][:]
def get_buildtype_args(self, buildtype):
return armclang_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return arm_buildtype_linker_args[buildtype]
# Override CCompiler.get_std_shared_lib_link_args
def get_std_shared_lib_link_args(self):
return []
def get_pch_suffix(self):
return 'gch'
def get_pch_use_args(self, pch_dir, header):
# Workaround for Clang bug http://llvm.org/bugs/show_bug.cgi?id=15136
# This flag is internal to Clang (or at least not documented on the man page)
# so it might change semantics at any time.
return ['-include-pch', os.path.join(pch_dir, self.get_pch_name(header))]
# Override CCompiler.get_dependency_gen_args
def get_dependency_gen_args(self, outtarget, outfile):
return []
# Override CCompiler.build_rpath_args
def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
return []
def get_linker_exelist(self):
return [self.linker_exe]
def get_optimization_args(self, optimization_level):
return armclang_optimization_args[optimization_level]
def get_debug_args(self, is_debug):
return clike_debug_args[is_debug]
def gen_export_dynamic_link_args(self, env):
"""
The args for export dynamic
"""
return ['--export_dynamic']
def gen_import_library_args(self, implibname):
"""
The args of the outputted import library
ArmLinker's symdefs output can be used as implib
"""
return ['--symdefs=' + implibname]
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '-L':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
return parameter_list
# Tested on linux for ICC 14.0.3, 15.0.6, 16.0.4, 17.0.1, 19.0.0
class IntelCompiler(GnuLikeCompiler):
def __init__(self, compiler_type):
super().__init__(compiler_type)
# As of 19.0.0 ICC doesn't have sanitizer, color, or lto support.
#
# It does have IPO, which serves much the same purpose as LOT, but
# there is an unfortunate rule for using IPO (you can't control the
# name of the output file) which break assumptions meson makes
self.base_options = ['b_pch', 'b_lundef', 'b_asneeded', 'b_pgo',
'b_coverage', 'b_ndebug', 'b_staticpic', 'b_pie']
self.id = 'intel'
self.lang_header = 'none'
def get_optimization_args(self, optimization_level):
return clike_optimization_args[optimization_level]
def get_pch_suffix(self) -> str:
return 'pchi'
def get_pch_use_args(self, pch_dir, header):
return ['-pch', '-pch_dir', os.path.join(pch_dir), '-x',
self.lang_header, '-include', header, '-x', 'none']
def get_pch_name(self, header_name):
return os.path.basename(header_name) + '.' + self.get_pch_suffix()
def openmp_flags(self) -> List[str]:
if version_compare(self.version, '>=15.0.0'):
return ['-qopenmp']
else:
return ['-openmp']
def compiles(self, *args, **kwargs):
# This covers a case that .get('foo', []) doesn't, that extra_args is
# defined and is None
extra_args = kwargs.get('extra_args') or []
kwargs['extra_args'] = [
extra_args,
'-diag-error', '10006', # ignoring unknown option
'-diag-error', '10148', # Option not supported
'-diag-error', '10155', # ignoring argument required
'-diag-error', '10156', # ignoring not argument allowed
'-diag-error', '10157', # Ignoring argument of the wrong type
'-diag-error', '10158', # Argument must be separate. Can be hit by trying an option like -foo-bar=foo when -foo=bar is a valid option but -foo-bar isn't
'-diag-error', '1292', # unknown __attribute__
]
return super().compiles(*args, **kwargs)
def get_profile_generate_args(self):
return ['-prof-gen=threadsafe']
def get_profile_use_args(self):
return ['-prof-use']
class ArmCompiler:
# Functionality that is common to all ARM family compilers.
def __init__(self, compiler_type):
if not self.is_cross:
raise EnvironmentException('armcc supports only cross-compilation.')
self.id = 'arm'
self.compiler_type = compiler_type
default_warn_args = []
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + [],
'3': default_warn_args + []}
# Assembly
self.can_compile_suffixes.add('s')
def can_linker_accept_rsp(self):
return False
def get_pic_args(self):
# FIXME: Add /ropi, /rwpi, /fpic etc. qualifiers to --apcs
return []
def get_buildtype_args(self, buildtype):
return arm_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return arm_buildtype_linker_args[buildtype]
# Override CCompiler.get_always_args
def get_always_args(self):
return []
# Override CCompiler.get_dependency_gen_args
def get_dependency_gen_args(self, outtarget, outfile):
return []
# Override CCompiler.get_std_shared_lib_link_args
def get_std_shared_lib_link_args(self):
return []
def get_pch_use_args(self, pch_dir, header):
# FIXME: Add required arguments
# NOTE from armcc user guide:
# "Support for Precompiled Header (PCH) files is deprecated from ARM Compiler 5.05
# onwards on all platforms. Note that ARM Compiler on Windows 8 never supported
# PCH files."
return []
def get_pch_suffix(self):
# NOTE from armcc user guide:
# "Support for Precompiled Header (PCH) files is deprecated from ARM Compiler 5.05
# onwards on all platforms. Note that ARM Compiler on Windows 8 never supported
# PCH files."
return 'pch'
def thread_flags(self, env):
return []
def thread_link_flags(self, env):
return []
def get_linker_exelist(self):
args = ['armlink']
return args
def get_coverage_args(self):
return []
def get_coverage_link_args(self):
return []
def get_optimization_args(self, optimization_level):
return arm_optimization_args[optimization_level]
def get_debug_args(self, is_debug):
return clike_debug_args[is_debug]
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
for idx, i in enumerate(parameter_list):
if i[:2] == '-I' or i[:2] == '-L':
parameter_list[idx] = i[:2] + os.path.normpath(os.path.join(build_dir, i[2:]))
return parameter_list
class CcrxCompiler:
def __init__(self, compiler_type):
if not self.is_cross:
raise EnvironmentException('ccrx supports only cross-compilation.')
# Check whether 'rlink.exe' is available in path
self.linker_exe = 'rlink.exe'
args = '--version'
try:
p, stdo, stderr = Popen_safe(self.linker_exe, args)
except OSError as e:
err_msg = 'Unknown linker\nRunning "{0}" gave \n"{1}"'.format(' '.join([self.linker_exe] + [args]), e)
raise EnvironmentException(err_msg)
self.id = 'ccrx'
self.compiler_type = compiler_type
# Assembly
self.can_compile_suffixes.update('s')
default_warn_args = []
self.warn_args = {'0': [],
'1': default_warn_args,
'2': default_warn_args + [],
'3': default_warn_args + []}
def can_linker_accept_rsp(self):
return False
def get_pic_args(self):
# PIC support is not enabled by default for CCRX,
# if users want to use it, they need to add the required arguments explicitly
return []
def get_buildtype_args(self, buildtype):
return ccrx_buildtype_args[buildtype]
def get_buildtype_linker_args(self, buildtype):
return ccrx_buildtype_linker_args[buildtype]
# Override CCompiler.get_std_shared_lib_link_args
def get_std_shared_lib_link_args(self):
return []
def get_pch_suffix(self):
return 'pch'
def get_pch_use_args(self, pch_dir, header):
return []
# Override CCompiler.get_dependency_gen_args
def get_dependency_gen_args(self, outtarget, outfile):
return []
# Override CCompiler.build_rpath_args
def build_rpath_args(self, build_dir, from_dir, rpath_paths, build_rpath, install_rpath):
return []
def thread_flags(self, env):
return []
def thread_link_flags(self, env):
return []
def get_linker_exelist(self):
return [self.linker_exe]
def get_linker_lib_prefix(self):
return '-lib='
def get_coverage_args(self):
return []
def get_coverage_link_args(self):
return []
def get_optimization_args(self, optimization_level):
return ccrx_optimization_args[optimization_level]
def get_debug_args(self, is_debug):
return ccrx_debug_args[is_debug]
@classmethod
def unix_args_to_native(cls, args):
result = []
for i in args:
if i.startswith('-D'):
i = '-define=' + i[2:]
if i.startswith('-I'):
i = '-include=' + i[2:]
if i.startswith('-Wl,-rpath='):
continue
elif i == '--print-search-dirs':
continue
elif i.startswith('-L'):
continue
result.append(i)
return result
def compute_parameters_with_absolute_paths(self, parameter_list, build_dir):
for idx, i in enumerate(parameter_list):
if i[:9] == '-include=':
parameter_list[idx] = i[:9] + os.path.normpath(os.path.join(build_dir, i[9:]))
return parameter_list
| 38.610482 | 165 | 0.56758 |
7955f47e3d6e3817a1aae782d7214d1799c8cc90 | 28,985 | py | Python | hours/importer/kirjastot.py | Sukriva/hauki | 2ed569d2e4b1f1cf315939aa15c52dcabd52876b | [
"MIT"
] | null | null | null | hours/importer/kirjastot.py | Sukriva/hauki | 2ed569d2e4b1f1cf315939aa15c52dcabd52876b | [
"MIT"
] | null | null | null | hours/importer/kirjastot.py | Sukriva/hauki | 2ed569d2e4b1f1cf315939aa15c52dcabd52876b | [
"MIT"
] | null | null | null | from collections import defaultdict
from datetime import date, datetime, time
from itertools import groupby
from operator import itemgetter
import holidays
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from django import db
from django.db.models import Q
from django.utils import timezone
from ..enums import RuleContext, RuleSubject, State, Weekday
from ..models import (
DataSource,
DatePeriod,
Resource,
TimeElement,
combine_element_time_spans,
)
from .base import Importer, register_importer
from .sync import ModelSyncher
KIRKANTA_STATUS_MAP = {0: State.CLOSED, 1: State.OPEN, 2: State.SELF_SERVICE}
fi_holidays = holidays.Finland()
# List of periods that are known not to be a rotation of x weeks, but need to be
# handled day-by-day.
KIRKANTA_LONG_EXCEPTIONAL_PERIODS = [
# Library "Metropolian kirjasto | Myyrmäki" Period "Joulu ja vuodenvaihde 2020"
346087,
]
# Periods that are not usable from the kirkanta and are thus hard coded
KIRKANTA_FIXED_GROUPS = {
# Library "Saksalainen kirjasto Deutsche Bibliothek" Period "Kirjaston aukioloajat"
# Opening hours from their website:
# https://www.deutsche-bibliothek.org/fi/kirjaston/oeffnungszeiten.html
# maanantaisin klo 10-18, tiistai-perjantai klo 10-16
# sekä kuukauden viimeisenä lauantaina klo 10-15.
303888: [
{
"time_spans": [
{
"group": None,
"start_time": time(hour=10, minute=0),
"end_time": time(hour=18, minute=0),
"weekdays": [Weekday.MONDAY],
"resource_state": State.OPEN,
"full_day": False,
},
{
"group": None,
"start_time": time(hour=10, minute=0),
"end_time": time(hour=16, minute=0),
"weekdays": [
Weekday.TUESDAY,
Weekday.WEDNESDAY,
Weekday.THURSDAY,
Weekday.FRIDAY,
],
"resource_state": State.OPEN,
"full_day": False,
},
],
"rules": [],
},
{
"time_spans": [
{
"group": None,
"start_time": time(hour=10, minute=0),
"end_time": time(hour=15, minute=0),
"weekdays": [Weekday.SATURDAY],
"resource_state": State.OPEN,
"full_day": False,
}
],
"rules": [
{
"group": None,
"context": RuleContext.MONTH,
"subject": RuleSubject.SATURDAY,
"start": -1,
}
],
},
],
}
@register_importer
class KirjastotImporter(Importer):
name = "kirjastot"
def setup(self):
self.URL_BASE = "https://api.kirjastot.fi/v4/"
ds_args = dict(id="kirkanta")
defaults = dict(name="kirjastot.fi")
self.data_source, _ = DataSource.objects.get_or_create(
defaults=defaults, **ds_args
)
@staticmethod
def get_date_range(
start: date = None, back: int = 1, forward: int = 12
) -> (date, date):
"""
Returns a date range of "back" months before and "forward" months after
given date, or today.
"""
if not start:
start = timezone.now().date()
begin = start - relativedelta(months=back)
end = start + relativedelta(months=forward)
return begin, end
def get_hours_from_api(self, resource: Resource, start: date, end: date) -> dict:
"""
Fetch opening hours for Target from kirjastot.fi's v4 API for the
given date range.
"""
kirkanta_id = resource.origins.get(data_source=self.data_source).origin_id
params = {
"with": "schedules",
"refs": "period",
"period.start": start,
"period.end": end,
}
data = self.api_get("library", kirkanta_id, params)
if data["total"] > 0:
return data
return {}
def check_period_common_dates_equal(
self, period_id: int, first_data: list, second_data: list
) -> bool:
"""
Checks that existing openings in both lists are equal, if their period_ids
match.
"""
if not first_data or not second_data:
return True
for first_opening, second_opening in zip(first_data, second_data):
if not first_opening or not second_opening:
# either pattern has no data for the given day, so it's always a match
# for the day
continue
# times are not necessarily in the same order. order the times with status
# and start first
first_opening["times"].sort(key=itemgetter("status", "from", "to"))
second_opening["times"].sort(key=itemgetter("status", "from", "to"))
if (
first_opening["period"] == period_id
and second_opening["period"] == period_id
and first_opening["times"] != second_opening["times"]
):
return False
return True
def get_weekday_pattern_candidate(
self, weekday_openings_by_date: list, n: int, period_id: int
) -> list:
"""
Returns the pattern for n consecutive weeks from weekday_openings_by_date,
merging data from several repetitions to find all n weeks with period_id.
"""
first_n_weeks = weekday_openings_by_date[0:n]
weeks_to_return = []
for index, weekly_opening in enumerate(first_n_weeks):
try:
round = 0
while weekly_opening["period"] != period_id:
# this opening won't do, looking at the next repetition
round += 1
weekly_opening = weekday_openings_by_date[round * n + index]
except IndexError:
# period doesn't contain a single opening for this weekday
weekly_opening = None
weeks_to_return.append(weekly_opening)
return weeks_to_return
def get_openings(self, data: list, period_start: date = None) -> list:
"""
Generates serialized opening rules for a single period from sorted list of
dates and their opening times. Each period needs to be processed separately.
Dates may not be missing, but their data may be missing.
We assume missing data at start, end or middle is indication of period overlap
and date filtering, not period irregularity, and extrapolate from all the data
we have up to the period length.
period_start must be specified if the period doesn't start from the first day
of data. This is because weekly rotations are always counted from the start
of the period. Otherwise period is assumed to start on first date of data.
"""
period_id = data[0]["period"]
start_date = data[0]["date"]
# starting date is needed to identify the first week, in case of repetitions of
# multiple weeks
if not period_start:
period_start = start_date
start_weekday = start_date.isoweekday()
period_start_weekday = period_start.isoweekday()
openings_by_weekday = groupby(
sorted(data, key=itemgetter("weekday")), key=itemgetter("weekday")
)
# 1st (preprocess): group by differing times for the same weekday, if found,
# and find the repetitions
repetition_pattern = {}
for weekday, openings_by_date in openings_by_weekday:
openings_by_date = list(openings_by_date)
n_weeks = len(openings_by_date)
pattern_candidate = []
# starting from the assumption that the openings repeat weekly, we increase
# the repetition by one week and try again until the pattern matches
for repetition in range(1, n_weeks + 1):
pattern_candidate = self.get_weekday_pattern_candidate(
openings_by_date, repetition, period_id
)
slices = [
openings_by_date[i : i + repetition]
for i in range(0, n_weeks, repetition)
]
# *all* slices must be equal whenever the period_id matches
for pattern_slice in slices:
if not self.check_period_common_dates_equal(
period_id, pattern_slice, pattern_candidate
):
# slice mismatch, no repetition
break
else:
# end of loop reached, hooray! We have the repetition
break
repetition_pattern[weekday] = pattern_candidate
# first week may be partial, so openings for some weekdays start from the second
# week, first week pattern is found at the end. move those patterns by one week
for (weekday, pattern) in repetition_pattern.items():
if weekday < start_weekday:
repetition_pattern[weekday] = [pattern[-1]] + pattern[:-1]
# repetition pattern may actually start in the middle of the period if we don't
# have data from period start. shift the pattern so it starts from period start
days_to_shift = (
start_date
- relativedelta(days=start_weekday - 1)
- period_start
+ relativedelta(days=period_start_weekday - 1)
)
weeks_to_shift = days_to_shift.weeks
for (weekday, pattern) in repetition_pattern.items():
repetition_length = len(pattern)
slice_index = weeks_to_shift % repetition_length
if slice_index:
repetition_pattern[weekday] = (
pattern[-slice_index:] + pattern[: repetition_length - slice_index]
)
# 2nd (loop again): generate time span groups based on the data for each
# weekday and varying repetition length
openings_by_repetition_length = groupby(
sorted(repetition_pattern.values(), key=len), len
)
time_span_groups = []
for length, patterns in openings_by_repetition_length:
openings_by_week = zip(*patterns)
for (rotation_week_num, week_opening_times) in enumerate(openings_by_week):
week_opening_times_by_status = defaultdict(list)
for day_in_the_week in week_opening_times:
# Opening times may be empty if we have no data for this
# particular day of the period
if day_in_the_week:
if day_in_the_week["times"]:
for week_opening_time in day_in_the_week["times"]:
week_opening_time["weekday"] = day_in_the_week[
"weekday"
]
week_opening_times_by_status[
week_opening_time["status"]
].append(week_opening_time)
else:
# Closed for the whole day
week_opening_times_by_status[0].append(
{"weekday": day_in_the_week["weekday"]}
)
time_spans = []
for status, week_opening_times in week_opening_times_by_status.items():
for week_opening_time in week_opening_times:
if "from" not in week_opening_time:
week_opening_time["from"] = ""
if "to" not in week_opening_time:
week_opening_time["to"] = ""
grouped_times = groupby(
sorted(week_opening_times, key=itemgetter("from", "to")),
itemgetter("from", "to"),
)
for opening_time, opening_times in grouped_times:
full_day = False
if not opening_time[0] and not opening_time[1]:
full_day = True
time_spans.append(
{
"group": None,
"start_time": datetime.strptime(
opening_time[0], "%H:%M"
).time()
if opening_time[0]
else None,
"end_time": datetime.strptime(
opening_time[1], "%H:%M"
).time()
if opening_time[1]
else None,
"weekdays": [
Weekday.from_iso_weekday(i["weekday"])
for i in opening_times
],
"resource_state": KIRKANTA_STATUS_MAP[status],
"full_day": full_day,
}
)
time_span_group = {
"time_spans": time_spans,
"rules": [],
}
if length > 1:
time_span_group["rules"].append(
{
"group": None,
"context": RuleContext.PERIOD,
"subject": RuleSubject.WEEK,
"start": rotation_week_num + 1,
"frequency_ordinal": length,
}
)
time_span_groups.append(time_span_group)
return time_span_groups
def separate_exceptional_periods(self, resource: Resource, period: dict) -> list:
if all([d.get("closed", True) for d in period["days"]]):
return [
{
"resource": resource,
"name": {"fi": period.get("name", "")},
"start_date": parse(period["validFrom"]).date(),
"end_date": parse(period["validUntil"]).date(),
"resource_state": State.CLOSED,
"override": True,
"origins": [
{
"data_source_id": self.data_source.id,
"origin_id": str(period["id"]),
}
],
}
]
periods = []
for day in period["days"]:
name = day.get("info") if day.get("info") else fi_holidays.get(day["date"])
sub_period = {
"resource": resource,
"name": {"fi": name},
"start_date": day["date"],
"end_date": day["date"],
"resource_state": State.UNDEFINED,
"override": True,
"origins": [
{
"data_source_id": self.data_source.id,
"origin_id": str(period["id"]) + "-" + str(day["date"]),
}
],
}
if day["closed"]:
sub_period["resource_state"] = State.CLOSED
periods.append(sub_period)
continue
time_spans = []
for opening_time in day["times"]:
time_spans.append(
{
"group": None,
"start_time": datetime.strptime(
opening_time["from"], "%H:%M"
).time(),
"end_time": datetime.strptime(
opening_time["to"], "%H:%M"
).time(),
"resource_state": KIRKANTA_STATUS_MAP[opening_time["status"]],
"name": {"fi": day.get("info")},
}
)
sub_period["time_span_groups"] = [
{
"time_spans": time_spans,
"rules": [],
}
]
periods.append(sub_period)
return periods
def get_kirkanta_periods(self, data: dict) -> dict:
"""
Annotates kirkanta data so that periods contain indexed data for each day for
their duration. Returned periods may contain empty days or days belonging to
other periods, since original data may have period overlaps.
"""
periods = data.get("refs", {}).get("period", None)
if not periods:
return {}
# sort the data just in case the API didn't
data["data"]["schedules"].sort(key=lambda x: x["date"])
# TODO: check for missing dates?
# parse and annotate the data with day indices and weekdays
for index, day in enumerate(data["data"]["schedules"]):
day["date"] = parse(day["date"]).date()
day["weekday"] = day["date"].isoweekday()
day["index"] = index
days_by_period = groupby(
sorted(data["data"]["schedules"], key=itemgetter("period")),
key=itemgetter("period"),
)
for period_id, days in days_by_period:
days = list(days)
start_index = days[0]["index"]
end_index = days[-1]["index"]
# Here we just slice the data for the duration of the period.
# All days must be present for rotation indexing.
schedules = data["data"]["schedules"][start_index : end_index + 1]
period_id_string = str(period_id)
if period_id_string not in periods.keys():
self.logger.info(
"Period {} not found in periods! Ignoring data {}".format(
period_id_string, days
)
)
periods[period_id_string]["days"] = schedules
return periods
def _get_times_for_sort(self, item: TimeElement) -> tuple:
return (
item.start_time if item.start_time else "",
item.end_time if item.end_time else "",
# Resource state is included to sort items with the same start
# and end times. Can't use Enum so we use the value instead.
# The order of the states is not important here.
item.resource_state.value if item.resource_state else "",
)
def check_library_data(self, library, data, start_date, end_date):
"""Checks that the daily opening hours match the schedule in the data
Raises AssertionError if they don't match"""
override_periods = []
kirkanta_periods = data.get("refs", {}).get("period", None)
schedules = data.get("data", {}).get("schedules")
if not schedules:
self.logger.info("No schedules found in the incoming data. Skipping.")
return
for kirkanta_period in kirkanta_periods.values():
valid_from = None
valid_until = None
if kirkanta_period["validFrom"]:
valid_from = parse(kirkanta_period["validFrom"]).date()
if kirkanta_period["validUntil"]:
valid_until = parse(kirkanta_period["validUntil"]).date()
if valid_from is not None and valid_until is not None:
time_delta = valid_until - valid_from
if time_delta.days < 7:
override_periods.append(kirkanta_period["id"])
continue
period_schedules = [
i for i in schedules if i.get("period") == kirkanta_period["id"]
]
if all([d["closed"] for d in period_schedules]):
override_periods.append(kirkanta_period["id"])
if kirkanta_period["isException"]:
override_periods.append(kirkanta_period["id"])
opening_hours = library.get_daily_opening_hours(start_date, end_date)
for schedule in schedules:
time_elements = []
if schedule.get("closed") is True:
time_elements.append(
TimeElement(
start_time=None,
end_time=None,
end_time_on_next_day=False,
resource_state=State.CLOSED,
override=True
if schedule.get("period") in override_periods
else False,
full_day=True,
)
)
else:
for schedule_time in schedule.get("times"):
try:
start_time = datetime.strptime(
schedule_time.get("from"), "%H:%M"
).time()
except ValueError:
start_time = None
try:
end_time = datetime.strptime(
schedule_time.get("to"), "%H:%M"
).time()
except ValueError:
end_time = None
end_time_on_next_day = False
if start_time and end_time and end_time <= start_time:
end_time_on_next_day = True
time_elements.append(
TimeElement(
start_time=start_time,
end_time=end_time,
end_time_on_next_day=end_time_on_next_day,
resource_state=KIRKANTA_STATUS_MAP[schedule_time["status"]],
override=True
if schedule.get("period") in override_periods
else False,
full_day=False,
)
)
schedule_date = schedule.get("date")
if not isinstance(schedule_date, date):
schedule_date = parse(schedule.get("date")).date()
time_elements = combine_element_time_spans(time_elements)
time_elements.sort(key=self._get_times_for_sort)
opening_hours[schedule_date].sort(key=self._get_times_for_sort)
assert time_elements == opening_hours[schedule_date]
@db.transaction.atomic
def import_openings(self):
libraries = Resource.objects.filter(origins__data_source=self.data_source)
if self.options.get("single", None):
libraries = libraries.filter(origins__origin_id=self.options["single"])
start_date = self.options.get("date", None)
if start_date:
start_date = datetime.strptime(start_date, "%Y-%m-%d").date()
self.logger.info("{} libraries found".format(libraries.count()))
import_start_date, import_end_date = self.get_date_range(
start=start_date, back=0
)
queryset = (
DatePeriod.objects.filter(
origins__data_source=self.data_source, resource__in=libraries
)
.filter(Q(end_date=None) | Q(end_date__gte=import_start_date))
.prefetch_related("time_span_groups__time_spans")
)
syncher = ModelSyncher(
queryset,
delete_func=self.mark_deleted,
check_deleted_func=self.check_deleted,
)
for library in libraries:
library._has_fixed_periods = False
self.logger.info(
'Importing hours for "{}" id:{}...'.format(library.name, library.id)
)
library._kirkanta_data = self.get_hours_from_api(
library, import_start_date, import_end_date
)
kirkanta_periods = self.get_kirkanta_periods(library._kirkanta_data)
periods = []
for kirkanta_period in kirkanta_periods.values():
valid_from = None
valid_until = None
if kirkanta_period["validFrom"]:
valid_from = parse(kirkanta_period["validFrom"]).date()
if kirkanta_period["validUntil"]:
valid_until = parse(kirkanta_period["validUntil"]).date()
self.logger.debug(
'period #{} "{}": {} - {}'.format(
kirkanta_period["id"],
kirkanta_period.get("name", ""),
valid_from,
valid_until,
)
)
if valid_from is not None and valid_until is not None:
time_delta = valid_until - valid_from
if (
time_delta.days < 7
or kirkanta_period["id"] in KIRKANTA_LONG_EXCEPTIONAL_PERIODS
):
self.logger.debug("Importing as separate days.")
periods.extend(
self.separate_exceptional_periods(library, kirkanta_period)
)
continue
self.logger.debug("Importing as a longer period.")
override = False
if all([d.get("closed", True) for d in kirkanta_period["days"]]):
override = True
state = State.CLOSED
else:
state = State.UNDEFINED
if kirkanta_period["isException"]:
override = True
long_period = {
"resource": library,
"name": {"fi": kirkanta_period.get("name", "")},
"start_date": valid_from,
"end_date": valid_until,
"resource_state": state,
"override": override,
"origins": [
{
"data_source_id": self.data_source.id,
"origin_id": str(kirkanta_period["id"]),
}
],
"time_span_groups": self.get_openings(
kirkanta_period["days"], period_start=valid_from
),
}
if kirkanta_period["id"] in KIRKANTA_FIXED_GROUPS:
long_period["time_span_groups"] = KIRKANTA_FIXED_GROUPS[
kirkanta_period["id"]
]
library._has_fixed_periods = True
periods.append(long_period)
for period_data in periods:
period = self.save_dateperiod(period_data)
syncher.mark(period)
syncher.finish(force=self.options["force"])
for library in libraries:
if library._has_fixed_periods:
self.logger.info(
"Not checking because library has fixed periods in the importer."
)
else:
self.logger.info(
'Checking hours for "{}" id:{}...'.format(library.name, library.id)
)
self.check_library_data(
library, library._kirkanta_data, import_start_date, import_end_date
)
self.logger.info("Check OK.")
def import_check(self):
libraries = Resource.objects.filter(origins__data_source=self.data_source)
if self.options.get("single", None):
libraries = libraries.filter(origins__origin_id=self.options["single"])
start_date = self.options.get("date", None)
if start_date:
start_date = datetime.strptime(start_date, "%Y-%m-%d").date()
self.logger.info("{} libraries found".format(libraries.count()))
import_start_date, import_end_date = self.get_date_range(
start=start_date, back=0
)
for library in libraries:
self.logger.info(
'Fetching schedule for "{}" id:{}...'.format(library.name, library.id)
)
data = self.get_hours_from_api(library, import_start_date, import_end_date)
self.check_library_data(library, data, import_start_date, import_end_date)
self.logger.info("Check OK.")
| 40.089903 | 88 | 0.513127 |
7955f4f44be847e24200043875087d29b51798d7 | 5,182 | py | Python | samples/mnist/main.py | Sergiodiaz53/tensorflow-layer-library | c4c71b59c034f4ec894580943e161c2971a92035 | [
"MIT"
] | 9 | 2017-06-11T08:48:11.000Z | 2018-09-06T10:06:22.000Z | samples/mnist/main.py | Sergiodiaz53/tensorflow-layer-library | c4c71b59c034f4ec894580943e161c2971a92035 | [
"MIT"
] | 2 | 2018-06-22T08:38:49.000Z | 2018-06-22T09:19:07.000Z | samples/mnist/main.py | Sergiodiaz53/tensorflow-layer-library | c4c71b59c034f4ec894580943e161c2971a92035 | [
"MIT"
] | 6 | 2018-06-15T14:15:52.000Z | 2018-11-19T21:56:51.000Z | """
© Michael Widrich, Markus Hofmarcher, 2017
Example for mnist predictions via dense network
Command-line usage:
>>> python3 samples/mnist/main_convlstm.py --config=samples/mnist/config.json
"""
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
# Import TeLL
from TeLL.config import Config
from TeLL.session import TeLLSession
from TeLL.utility.timer import Timer
from TeLL.utility.misc import AbortRun, check_kill_file
from TeLL.regularization import decor_penalty
# Import Tensorflow
if __name__ == "__main__":
import tensorflow as tf
# ----------------------------------------------------------------------------------------------------------------------
# Functions
# ----------------------------------------------------------------------------------------------------------------------
def main(_):
config = Config()
# Create new TeLL session with two summary writers
tell = TeLLSession(config=config, summaries=["train", "validation"])
# Get some members from the session for easier usage
session = tell.tf_session
summary_writer_train, summary_writer_validation = tell.tf_summaries["train"], tell.tf_summaries["validation"]
model = tell.model
workspace, config = tell.workspace, tell.config
# Parameters
learning_rate = config.get_value("learning_rate", 1e-3)
iterations = config.get_value("iterations", 1000)
batchsize = config.get_value("batchsize", 250)
display_step = config.get_value("display_step", 10)
dropout = config.get_value("dropout_prob", 0.25)
#
# Load Data
#
with Timer(name="Load data"):
mnist = input_data.read_data_sets("../MNIST_data", one_hot=True)
# Define loss and optimizer
with tf.name_scope("Cost"):
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=model.output, labels=model.y_))
##entropy = tf.reduce_mean(tf.contrib.bayesflow.entropy.entropy_shannon(
## tf.contrib.distributions.Categorical(p=tf.nn.softmax(logits=model.output))))
probs = tf.nn.softmax(logits=model.output)
entropy = tf.reduce_mean(-tf.reduce_sum(tf.log(tf.maximum(probs, 1e-15)) * probs, 1))
# test decor regularization
#decor_penalty(model.hidden1, model.y_, 10, [1], 0.)
#decor_penalty(model.hidden2, model.y_, 10, [1], 0.)
optimizer = tell.tf_optimizer.minimize(cost - config.get_value("entropy_w", 0.) * entropy)
tf.summary.scalar("Loss", cost)
#tf.summary.scalar("Decor", decor1 + decor2)
#tf.summary.scalar("Entropy", entropy)
tf.summary.scalar("O-Prob", tf.reduce_mean(tf.reduce_sum(tf.nn.softmax(logits=model.output) * model.y_, 1)))
# Evaluate model
with tf.name_scope("Accuracy"):
correct_pred = tf.equal(tf.argmax(model.output, 1), tf.argmax(model.y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))
tf.summary.scalar("Accuracy", accuracy)
merged_summaries = tf.summary.merge_all()
# Initialize tensorflow variables (either initializes them from scratch or restores from checkpoint)
step = tell.initialize_tf_variables(reset_optimizer_on_restore=True).global_step
# -------------------------------------------------------------------------
# Start training
# -------------------------------------------------------------------------
acc_train = 0.
val_acc_best = 0.
try:
while step < iterations:
check_kill_file(workspace=workspace)
batch_x, batch_y = mnist.train.next_batch(batchsize)
i = step * batchsize
if step % display_step == 0:
summary, acc = session.run([merged_summaries, accuracy],
feed_dict={model.X: mnist.validation.images[:2048],
model.y_: mnist.validation.labels[:2048],
model.dropout: 0})
summary_writer_validation.add_summary(summary, i)
print('step {}: train acc {}, valid acc {}'.format(i, acc_train, acc))
if acc > val_acc_best:
val_acc_best = acc
else:
summary, acc_train, _ = session.run([merged_summaries, accuracy, optimizer],
feed_dict={model.X: batch_x, model.y_: batch_y,
model.dropout: dropout})
summary_writer_train.add_summary(summary, i)
step += 1
print("Training Finished! best valid acc {}".format(val_acc_best))
# Final Eval
print("Test Accuracy:",
session.run(accuracy, feed_dict={model.X: mnist.test.images[:2048],
model.y_: mnist.test.labels[:2048],
model.dropout: 0}))
except AbortRun:
print("Aborting...")
finally:
tell.close(global_step=step)
if __name__ == "__main__":
tf.app.run()
| 40.484375 | 120 | 0.567349 |
7955f6f65e258d3efa2b9688217674b56ef226af | 2,466 | py | Python | test_czech_sort/test_data.py | jiri-one/czech-sort | c0ab0b3ea65f7c8eedb6973424656ff9ea7f8671 | [
"MIT"
] | 9 | 2015-09-17T10:45:59.000Z | 2021-08-30T20:24:52.000Z | test_czech_sort/test_data.py | jiri-one/czech-sort | c0ab0b3ea65f7c8eedb6973424656ff9ea7f8671 | [
"MIT"
] | 2 | 2020-09-18T14:17:51.000Z | 2021-08-30T14:47:56.000Z | test_czech_sort/test_data.py | jiri-one/czech-sort | c0ab0b3ea65f7c8eedb6973424656ff9ea7f8671 | [
"MIT"
] | 1 | 2021-08-28T10:11:29.000Z | 2021-08-28T10:11:29.000Z | # For Python 2, we need to declare the encoding: UTF-8, of course.
from __future__ import unicode_literals
import sys
import czech_sort
import pytest
inputs = (
# Examples from Wikipedia:
# https://cs.wikipedia.org/wiki/Abecedn%C3%AD_%C5%99azen%C3%AD
[' '] + '-'.split() +
'A B C Č D E F G H Ch I J K L M N O P Q R Ř S Š T U V W X Y Z Ž'.split() +
'0 1 2 3 4 5 6 7 8 9'.split() +
[],
'a á b c č d ď e é ě f g h ch i í j k l m n ň o ó p q r ř s š t ť u ú ů v w x y ý z ž'.split(),
['padá', 'sál', 'sála', 'sálá', 'säla', 'satira', 'si lehá', 'si nese',
'sílí', 'šála', 'šat', 'ta'],
# Examples from ÚJČ AV ČR:
# http://prirucka.ujc.cas.cz/?action=view&id=900
['shoda', 'schody', 'sídliště'],
['motýl noční', 'motýlek'],
['damašek', 'Damašek'],
['da capo', 'ďábel', 'dabing', 'ucho', 'úchop', 'uchopit'],
['kanon', 'kanón', 'kaňon', 'kánon'],
'á ď é ě í ň ó ť ú ů ý'.split(),
'à â ä ç è ê ĺ ľ ł ô ö ŕ ü ż'.split(),
'C Ç °C'.split(),
# XXX: 'C Ç °C X Xⁿ Xₑ Xⁿₑ'.split(),
'ZZ Z-2 Ž 3 3N 3no 5A 8'.split(),
# XXX: Symbols
'@&€£§%‰$',
# Others
['cyp', 'Cyp', 'CYP', 'čáp', 'Čáp', 'ČÁP', 'čupřina', 'Čupřina', 'ČUPŘINA'],
['goa uld', 'goa xyz', 'goa-uld', 'goauld', 'goàuld', "goa'uld", 'goa-xyz'],
['mac', 'mác', 'mah', 'máh', 'mach', 'mách', 'máchl', 'moh'],
"ȧ á ā à â ǎ ã ă ȃ å ä a̋ ȁ ą a' °a".split(),
['', ' ', '-', "'"],
['è', 'ê', 'ề'],
['a\n b', 'a \nb', 'a\nb', 'a b', 'ab'],
['Ļ', 'Ł', 'M', 'Ơ', 'Ø', 'P'],
)
def pytest_generate_tests(metafunc):
if metafunc.function.__name__ == 'test_sorted':
metafunc.parametrize(
'l', [list(l) for l in inputs])
if metafunc.function.__name__ == 'test_key':
metafunc.parametrize(
's', [c for l in inputs for c in l])
def test_sorted(l):
result = czech_sort.sorted(reversed(l))
print('exp:', l)
print('got:', result)
assert l == result
def test_key(s):
"""Assert keys are immutable and well ordered"""
# Actually, this is a strict type check
key = czech_sort.key(s)
check_key_element(key)
def check_key_element(t):
if type(t) in (str, int, bool):
return True
if sys.version_info < (3, 0) and type(t) is unicode:
return True
if type(t) is tuple:
for c in t:
check_key_element(c)
return
raise AssertionError('{0} is a {1}'.format(t, type(t)))
| 30.073171 | 99 | 0.531225 |
7955f7f8b287bfdba53cebebfd311900e3182f72 | 712 | py | Python | python/fate_arch/computing/_util.py | hubert-he/FATE | 6758e150bd7ca7d6f788f9a7a8c8aea7e6500363 | [
"Apache-2.0"
] | 3,787 | 2019-08-30T04:55:10.000Z | 2022-03-31T23:30:07.000Z | python/fate_arch/computing/_util.py | JavaGreenHands/FATE | ea1e94b6be50c70c354d1861093187e523af32f2 | [
"Apache-2.0"
] | 1,439 | 2019-08-29T16:35:52.000Z | 2022-03-31T11:55:31.000Z | python/fate_arch/computing/_util.py | JavaGreenHands/FATE | ea1e94b6be50c70c354d1861093187e523af32f2 | [
"Apache-2.0"
] | 1,179 | 2019-08-29T16:18:32.000Z | 2022-03-31T12:55:38.000Z | #
# Copyright 2019 The Eggroll Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from fate_arch.abc import CTableABC
def is_table(v):
return isinstance(v, CTableABC)
| 30.956522 | 75 | 0.747191 |
7955f8dfdb9f3b1f9bc9eddcfbb91ed300f2f8c3 | 777 | py | Python | sapp/sarif_types.py | facebook/sapp | 4b85d10a791d8e9c8ae83d1f62fbded24845f053 | [
"MIT"
] | 74 | 2020-12-18T20:04:30.000Z | 2022-03-22T22:26:02.000Z | sapp/sarif_types.py | facebook/sapp | 4b85d10a791d8e9c8ae83d1f62fbded24845f053 | [
"MIT"
] | 61 | 2020-12-21T21:33:05.000Z | 2022-01-27T21:22:20.000Z | sapp/sarif_types.py | facebook/sapp | 4b85d10a791d8e9c8ae83d1f62fbded24845f053 | [
"MIT"
] | 20 | 2021-04-08T01:28:53.000Z | 2022-03-22T22:26:05.000Z | # Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# pyre-strict
import sys
from enum import Enum
from typing import Dict, Union, List
if sys.version_info >= (3, 10):
from typing import TypeAlias
else:
from typing_extensions import TypeAlias
class SARIFSeverityLevel(Enum):
WARNING = "warning"
ERROR = "error"
NOTE = "note"
NONE = "none"
def __str__(self) -> str:
return self.value
SARIFRegionObject: TypeAlias = Dict[str, int]
SARIFResult: TypeAlias = Dict[
str,
Union[
Dict[str, str],
List[Dict[str, Dict[str, Union[SARIFRegionObject, Dict[str, str]]]]],
str,
],
]
| 19.923077 | 77 | 0.66538 |
7955fae37481b0976651d008fa07df1feb085364 | 372 | py | Python | app/test/base.py | raekw0n/raspi-mon-api | ea82f3424a4a5b7ddfe63b37a1a01e5c06c545d2 | [
"MIT"
] | null | null | null | app/test/base.py | raekw0n/raspi-mon-api | ea82f3424a4a5b7ddfe63b37a1a01e5c06c545d2 | [
"MIT"
] | null | null | null | app/test/base.py | raekw0n/raspi-mon-api | ea82f3424a4a5b7ddfe63b37a1a01e5c06c545d2 | [
"MIT"
] | null | null | null | from flask_testing import TestCase
from app.main import db
from manage import app
class BaseTestCase(TestCase):
def create_app(self):
app.config.from_object('app.main.config.TestingConfig')
return app
def setUp(self):
db.create_all()
db.session.commit()
def tearDown(self):
db.session.remove()
db.drop_all()
| 20.666667 | 63 | 0.658602 |
7955fb324e8ac97ce16fb73256cc0112e87a8092 | 9,075 | py | Python | examples/vision/anilkfo_cifarfs.py | Brikwerk/learn2learn | 7997c13c26ec627d13ce77ba98427260df78ada8 | [
"MIT"
] | 1,774 | 2019-09-05T20:41:16.000Z | 2022-03-30T09:49:02.000Z | examples/vision/anilkfo_cifarfs.py | Kostis-S-Z/learn2learn | c0b7c088f15986880b136ec27059644ac513db60 | [
"MIT"
] | 196 | 2019-09-05T08:11:31.000Z | 2022-03-31T12:08:25.000Z | examples/vision/anilkfo_cifarfs.py | Kostis-S-Z/learn2learn | c0b7c088f15986880b136ec27059644ac513db60 | [
"MIT"
] | 266 | 2019-09-13T10:17:54.000Z | 2022-03-28T07:17:21.000Z | #!/usr/bin/env python3
"""
File: anilkfo_cifarfs.py
Author: Seb Arnold - seba1511.net
Email: smr.arnold@gmail.com
Github: seba-1511
Description:
Demonstrates how to use the low-level differentiable optimization utilities
to implement ANIL+KFC on CIFAR-FS.
A demonstration of the high-level API is available in:
examples/vision/metacurvature_fc100.py
"""
import random
import numpy as np
import torch
import learn2learn as l2l
class CifarCNN(torch.nn.Module):
"""
Example of a 4-layer CNN network for FC100/CIFAR-FS.
"""
def __init__(self, output_size=5, hidden_size=32, layers=4):
super(CifarCNN, self).__init__()
self.hidden_size = hidden_size
features = l2l.vision.models.ConvBase(
output_size=hidden_size,
hidden=hidden_size,
channels=3,
max_pool=False,
layers=layers,
max_pool_factor=0.5,
)
self.features = torch.nn.Sequential(
features,
l2l.nn.Lambda(lambda x: x.mean(dim=[2, 3])),
l2l.nn.Flatten(),
)
self.linear = torch.nn.Linear(self.hidden_size, output_size, bias=True)
l2l.vision.models.maml_init_(self.linear)
def forward(self, x):
x = self.features(x)
x = self.linear(x)
return x
def accuracy(predictions, targets):
predictions = predictions.argmax(dim=1).view(targets.shape)
return (predictions == targets).sum().float() / targets.size(0)
def fast_adapt(
batch,
features,
classifier,
update,
diff_sgd,
loss,
adaptation_steps,
shots,
ways,
device):
data, labels = batch
data, labels = data.to(device), labels.to(device)
data = features(data)
# Separate data into adaptation/evalutation sets
adaptation_indices = np.zeros(data.size(0), dtype=bool)
adaptation_indices[np.arange(shots*ways) * 2] = True
evaluation_indices = torch.from_numpy(~adaptation_indices)
adaptation_indices = torch.from_numpy(adaptation_indices)
adaptation_data, adaptation_labels = data[adaptation_indices], labels[adaptation_indices]
evaluation_data, evaluation_labels = data[evaluation_indices], labels[evaluation_indices]
# Adapt the model & learned update
for step in range(adaptation_steps):
adaptation_error = loss(classifier(adaptation_data), adaptation_labels)
if step > 0: # Update the learnable update function
update_grad = torch.autograd.grad(adaptation_error,
update.parameters(),
create_graph=True,
retain_graph=True)
diff_sgd(update, update_grad)
classifier_updates = update(adaptation_error,
classifier.parameters(),
create_graph=True,
retain_graph=True)
diff_sgd(classifier, classifier_updates)
# Evaluate the adapted model
predictions = classifier(evaluation_data)
eval_error = loss(predictions, evaluation_labels)
eval_accuracy = accuracy(predictions, evaluation_labels)
return eval_error, eval_accuracy
def main(
fast_lr=0.1,
meta_lr=0.003,
num_iterations=10000,
meta_batch_size=16,
adaptation_steps=5,
shots=5,
ways=5,
cuda=1,
seed=1234
):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
device = torch.device('cpu')
if cuda and torch.cuda.device_count():
torch.cuda.manual_seed(seed)
device = torch.device('cuda')
# Create Tasksets using the benchmark interface
tasksets = l2l.vision.benchmarks.get_tasksets(
name='cifarfs',
train_samples=2*shots,
train_ways=ways,
test_samples=2*shots,
test_ways=ways,
root='~/data',
)
# Create model and learnable update
model = CifarCNN(output_size=ways)
model.to(device)
features = model.features
classifier = model.linear
kfo_transform = l2l.optim.transforms.KroneckerTransform(l2l.nn.KroneckerLinear)
fast_update = l2l.optim.ParameterUpdate(
parameters=classifier.parameters(),
transform=kfo_transform,
)
fast_update.to(device)
diff_sgd = l2l.optim.DifferentiableSGD(lr=fast_lr)
all_parameters = list(model.parameters()) + list(fast_update.parameters())
opt = torch.optim.Adam(all_parameters, meta_lr)
loss = torch.nn.CrossEntropyLoss(reduction='mean')
for iteration in range(num_iterations):
opt.zero_grad()
meta_train_error = 0.0
meta_train_accuracy = 0.0
meta_valid_error = 0.0
meta_valid_accuracy = 0.0
for task in range(meta_batch_size):
# Compute meta-training loss
task_features = l2l.clone_module(features)
task_classifier = l2l.clone_module(classifier)
task_update = l2l.clone_module(fast_update)
batch = tasksets.train.sample()
evaluation_error, evaluation_accuracy = fast_adapt(batch,
task_features,
task_classifier,
task_update,
diff_sgd,
loss,
adaptation_steps,
shots,
ways,
device)
evaluation_error.backward()
meta_train_error += evaluation_error.item()
meta_train_accuracy += evaluation_accuracy.item()
# Compute meta-validation loss
task_features = l2l.clone_module(features)
task_classifier = l2l.clone_module(classifier)
task_update = l2l.clone_module(fast_update)
batch = tasksets.validation.sample()
evaluation_error, evaluation_accuracy = fast_adapt(batch,
task_features,
task_classifier,
task_update,
diff_sgd,
loss,
adaptation_steps,
shots,
ways,
device)
meta_valid_error += evaluation_error.item()
meta_valid_accuracy += evaluation_accuracy.item()
# Print some metrics
print('\n')
print('Iteration', iteration)
print('Meta Train Error', meta_train_error / meta_batch_size)
print('Meta Train Accuracy', meta_train_accuracy / meta_batch_size)
print('Meta Valid Error', meta_valid_error / meta_batch_size)
print('Meta Valid Accuracy', meta_valid_accuracy / meta_batch_size)
# Average the accumulated gradients and optimize
for p in model.parameters():
p.grad.data.mul_(1.0 / meta_batch_size)
for p in fast_update.parameters():
p.grad.data.mul_(1.0 / meta_batch_size)
opt.step()
meta_test_error = 0.0
meta_test_accuracy = 0.0
for task in range(meta_batch_size):
# Compute meta-testing loss
task_features = l2l.clone_module(features)
task_classifier = l2l.clone_module(classifier)
task_update = l2l.clone_module(fast_update)
batch = tasksets.test.sample()
evaluation_error, evaluation_accuracy = fast_adapt(batch,
task_features,
task_classifier,
task_update,
diff_sgd,
loss,
adaptation_steps,
shots,
ways,
device)
meta_test_error += evaluation_error.item()
meta_test_accuracy += evaluation_accuracy.item()
print('Meta Test Error', meta_test_error / meta_batch_size)
print('Meta Test Accuracy', meta_test_accuracy / meta_batch_size)
if __name__ == '__main__':
main()
| 38.948498 | 93 | 0.534215 |
7955fb47fb019a8049ae40c12c69881594cf910e | 303 | py | Python | themissc/Pos/_Pos.py | mattkjames7/themissc | 1e7257d60da1069fffc7fed848ddcf5c780e9250 | [
"MIT"
] | null | null | null | themissc/Pos/_Pos.py | mattkjames7/themissc | 1e7257d60da1069fffc7fed848ddcf5c780e9250 | [
"MIT"
] | null | null | null | themissc/Pos/_Pos.py | mattkjames7/themissc | 1e7257d60da1069fffc7fed848ddcf5c780e9250 | [
"MIT"
] | null | null | null | import numpy as np
from .. import Globals
#this just stores a few variables for this particular instrument
#data path and index file name: format(Prod,L,sc)
idxfname = Globals.DataPath + 'Pos/{:s}.{:s}.{:s}.dat'
datapath = Globals.DataPath + 'Pos/{:s}/{:s}/{:s}/'
#file version format
vfmt = 'v\d\d'
| 25.25 | 64 | 0.686469 |
7955fb638912f1c804652b33a4a31a2601290f0c | 36,305 | py | Python | pytests/test_cphd_consistency.py | khavernathy/sarpy | e0c2bb1a55f153628162ef7be89e9b7de34602df | [
"MIT"
] | 1 | 2021-07-05T15:14:03.000Z | 2021-07-05T15:14:03.000Z | pytests/test_cphd_consistency.py | khavernathy/sarpy | e0c2bb1a55f153628162ef7be89e9b7de34602df | [
"MIT"
] | 1 | 2021-08-31T10:27:15.000Z | 2021-08-31T19:42:04.000Z | pytests/test_cphd_consistency.py | khavernathy/sarpy | e0c2bb1a55f153628162ef7be89e9b7de34602df | [
"MIT"
] | 1 | 2021-07-17T12:49:57.000Z | 2021-07-17T12:49:57.000Z | #
# Copyright 2020-2021 Valkyrie Systems Corporation
#
# Licensed under MIT License. See LICENSE.
#
import copy
import os
import re
import shutil
import tempfile
from lxml import etree
import numpy as np
import pytest
from sarpy.consistency import cphd_consistency
from sarpy.io.phase_history.cphd_schema import get_schema_path
TEST_FILE_NAMES = {
'simple': 'spotlight_example.cphd',
'has_antenna': 'has_antenna.cphd',
'bistatic': 'bistatic.cphd',
}
TEST_FILE_PATHS = {k: os.path.join(os.environ['SARPY_TEST_PATH'], 'cphd', v) for k,v in TEST_FILE_NAMES.items()}
GOOD_CPHD = TEST_FILE_PATHS['simple']
DEFAULT_SCHEMA = get_schema_path(version='1.0.1')
def make_elem(tag, text=None, children=None, namespace=None, attributes=None, **attrib):
"""
Creates described element.
Creates the Element with tag name, text, and attributes given. Attributes
can be specified as either a dictionary or keyword arguments.
Parameters
----------
tag : str
A string that will become the tag name.
text : str
A string that will become the text in the element. (Default: ``None``)
parent : lxml.etree.ElementTree.Element
The parent element. (Default: ``None``)
children : lxml.etree.ElementTree
The children elements. (Default: ``None``)
namespace : str
The string containing the namespace. (Default: ``None``)
attributes : dict
A dictionary mapping attribute names to values. (Default: ``None``)
**attrib : list
Keyword arguments that map to attributes. (Default: ``None``)
Returns
-------
lxml.etree.ElementTree.Element
"""
if attributes is None:
attributes = {}
if text is not None:
if isinstance(text, bool):
text = str(text).lower()
if not isinstance(text, str):
text = repr(text)
attrib = copy.copy(attrib)
attrib.update(attributes)
attrib = {key: str(value) for key, value in attrib.items()}
if namespace is not None:
tag = '{{{namespace}}}{tag}'.format(namespace=namespace, tag=tag)
retval = etree.Element(tag, attrib)
if text is not None:
retval.text = str(text)
if children is not None:
retval.extend([child for child in children if child is not None])
return retval
@pytest.fixture
def tmpdir():
dirname = tempfile.mkdtemp()
yield dirname
shutil.rmtree(dirname)
def _read_xml_str(cphd_path):
with open(cphd_path, 'rb') as fid:
header = cphd_consistency.read_header(fid)
fid.seek(header['XML_BLOCK_BYTE_OFFSET'], 0)
xml_block_size = header['XML_BLOCK_SIZE']
return fid.read(xml_block_size).decode()
@pytest.fixture(scope='module')
def good_xml_str():
return _read_xml_str(GOOD_CPHD)
@pytest.fixture(scope='module')
def good_xml_with_antenna():
xml_str = _read_xml_str(TEST_FILE_PATHS['has_antenna'])
xml_root = etree.fromstring(xml_str)
return cphd_consistency.strip_namespace(xml_root)
@pytest.fixture
def good_xml(good_xml_str):
good_xml_root = etree.fromstring(good_xml_str)
good_xml_root_no_ns = cphd_consistency.strip_namespace(etree.fromstring(good_xml_str))
yield {'with_ns': good_xml_root, 'without_ns': good_xml_root_no_ns,
'nsmap': {'ns': re.match(r'\{(.*)\}', good_xml_root.tag).group(1)}}
@pytest.fixture
def good_header():
with open(GOOD_CPHD, 'rb') as fid:
return cphd_consistency.read_header(fid)
def remove_nodes(*nodes):
for node in nodes:
node.getparent().remove(node)
def copy_xml(elem):
return etree.fromstring(etree.tostring(elem))
def test_from_file_cphd():
cphdcon = cphd_consistency.CphdConsistency.from_file(str(GOOD_CPHD), DEFAULT_SCHEMA, True)
assert isinstance(cphdcon, cphd_consistency.CphdConsistency)
cphdcon.check()
assert not cphdcon.failures()
def test_from_file_xml(good_xml_str, tmpdir):
xml_file = os.path.join(tmpdir, 'cphd.xml')
with open(xml_file, 'w') as fid:
fid.write(good_xml_str)
cphdcon = cphd_consistency.CphdConsistency.from_file(str(xml_file), DEFAULT_SCHEMA, False)
assert isinstance(cphdcon, cphd_consistency.CphdConsistency)
cphdcon.check()
assert not cphdcon.failures()
def test_main(good_xml_str, tmpdir):
assert not cphd_consistency.main([str(GOOD_CPHD), '--schema', DEFAULT_SCHEMA, '--signal-data'])
assert not cphd_consistency.main([str(GOOD_CPHD), '--noschema'])
assert not cphd_consistency.main([str(GOOD_CPHD)])
xml_file = os.path.join(tmpdir, 'cphd.xml')
with open(xml_file, 'w') as fid:
fid.write(good_xml_str)
assert not cphd_consistency.main([str(xml_file), '-v'])
@pytest.mark.parametrize('cphd_file', TEST_FILE_PATHS.values())
def test_main_each_file(cphd_file):
assert not cphd_consistency.main([cphd_file])
def test_xml_schema_error(good_xml):
bad_xml = copy_xml(good_xml['with_ns'])
remove_nodes(*bad_xml.xpath('./ns:Global/ns:DomainType', namespaces=good_xml['nsmap']))
cphd_con = cphd_consistency.CphdConsistency(bad_xml, pvps={}, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_against_schema')
assert cphd_con.failures()
def test_check_unconnected_ids_severed_node(good_xml):
bad_xml = copy_xml(good_xml['without_ns'])
bad_xml.find('./Dwell/CODTime/Identifier').text += '-make-bad'
cphd_con = cphd_consistency.CphdConsistency(bad_xml, pvps={}, header=good_header, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_unconnected_ids')
assert cphd_con.failures()
def test_check_unconnected_ids_extra_node(good_xml):
bad_xml = copy_xml(good_xml['without_ns'])
first_acf = bad_xml.find('./Antenna/AntCoordFrame')
extra_acf = copy.deepcopy(first_acf)
extra_acf.find('./Identifier').text += '_superfluous'
first_acf.getparent().append(extra_acf)
cphd_con = cphd_consistency.CphdConsistency(bad_xml, pvps={}, header=good_header, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_unconnected_ids')
assert cphd_con.failures()
def test_check_classification_and_release_info_error(good_xml, good_header):
bad_xml = copy_xml(good_xml['without_ns'])
bad_xml.find('./CollectionID/ReleaseInfo').text += '-make-bad'
cphd_con = cphd_consistency.CphdConsistency(bad_xml, pvps={}, header=good_header, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_classification_and_release_info')
assert cphd_con.failures()
def test_error_in_check(good_xml):
bad_xml = copy_xml(good_xml['with_ns'])
remove_nodes(*bad_xml.xpath('./ns:Channel/ns:Parameters/ns:DwellTimes/ns:CODId', namespaces=good_xml['nsmap']))
cphd_con = cphd_consistency.CphdConsistency(bad_xml, pvps={}, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
tocheck = []
for chan_id in bad_xml.findall('./ns:Data/ns:Channel/ns:Identifier', namespaces=good_xml['nsmap']):
tocheck.append('check_channel_dwell_exist_{}'.format(chan_id.text))
cphd_con.check(tocheck)
assert cphd_con.failures()
def test_polygon_size_error(good_xml):
bad_xml = copy_xml(good_xml['with_ns'])
ia_polygon_node = bad_xml.find('./ns:SceneCoordinates/ns:ImageArea/ns:Polygon', namespaces=good_xml['nsmap'])
ia_polygon_node.attrib['size'] = "12345678890"
cphd_con = cphd_consistency.CphdConsistency(bad_xml, pvps={}, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_global_imagearea_polygon')
assert cphd_con.failures()
def test_polygon_winding_error(good_xml):
bad_xml = copy_xml(good_xml['with_ns'])
ia_polygon_node = bad_xml.find('./ns:SceneCoordinates/ns:ImageArea/ns:Polygon', namespaces=good_xml['nsmap'])
size = int(ia_polygon_node.attrib['size'])
# Reverse the order of the vertices
for vertex in ia_polygon_node:
vertex.attrib['index'] = str(size - int(vertex.attrib['index']) + 1)
cphd_con = cphd_consistency.CphdConsistency(bad_xml, pvps={}, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_global_imagearea_polygon')
assert cphd_con.failures()
@pytest.fixture
def xml_with_signal_normal(good_xml):
root = copy_xml(good_xml['with_ns'])
pvps = {}
for channel_node in root.findall('./ns:Data/ns:Channel', namespaces=good_xml['nsmap']):
chan_id = channel_node.findtext('./ns:Identifier', namespaces=good_xml['nsmap'])
num_vect = int(channel_node.findtext('./ns:NumVectors', namespaces=good_xml['nsmap']))
pvps[chan_id] = np.ones(num_vect, dtype=[('SIGNAL', 'i8')])
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_id),
namespaces=good_xml['nsmap'])[0]
chan_param_node.append(make_elem('SignalNormal', 'true', namespace=good_xml['nsmap']['ns']))
return pvps, root, good_xml['nsmap']
def test_signalnormal(xml_with_signal_normal):
pvps, root, nsmap = xml_with_signal_normal
cphd_con = cphd_consistency.CphdConsistency(root, pvps=pvps, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
tocheck = ['check_channel_signalnormal_{}'.format(key) for key in pvps.keys()]
cphd_con.check(tocheck)
assert not cphd_con.failures()
def test_signalnormal_bad_pvp(xml_with_signal_normal):
pvps, root, nsmap = xml_with_signal_normal
for idx, pvp in enumerate(pvps.values()):
pvp['SIGNAL'][idx] = 0
cphd_con = cphd_consistency.CphdConsistency(root, pvps=pvps, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
tocheck = ['check_channel_signalnormal_{}'.format(key) for key in pvps.keys()]
cphd_con.check(tocheck)
assert len(cphd_con.failures()) == len(pvps)
for norm_node in root.findall('./ns:Channel/ns:Parameters/ns:SignalNormal', namespaces=nsmap):
norm_node.text = 'false'
cphd_con = cphd_consistency.CphdConsistency(root, pvps=pvps, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check(tocheck)
assert not cphd_con.failures()
no_sig_pvp = {name: np.zeros(pvp.shape, dtype=[('notsignal', 'i8')]) for name, pvp in pvps.items()}
cphd_con = cphd_consistency.CphdConsistency(root, pvps=no_sig_pvp, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check(tocheck)
assert cphd_con.failures()
@pytest.fixture
def xml_without_fxfixed(good_xml):
root = copy_xml(good_xml['with_ns'])
pvps = {}
for channel_node in root.findall('./ns:Data/ns:Channel', namespaces=good_xml['nsmap']):
chan_id = channel_node.findtext('./ns:Identifier', namespaces=good_xml['nsmap'])
num_vect = int(channel_node.findtext('./ns:NumVectors', namespaces=good_xml['nsmap']))
pvps[chan_id] = np.zeros(num_vect, dtype=[('FX1', 'f8'), ('FX2', 'f8')])
pvps[chan_id]['FX1'] = np.linspace(1.0, 1.1, num_vect)
pvps[chan_id]['FX2'] = np.linspace(2.0, 2.2, num_vect)
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_id),
namespaces=good_xml['nsmap'])[0]
chan_param_node.find('./ns:FXFixed', namespaces=good_xml['nsmap']).text = 'false'
root.find('./ns:Channel/ns:FXFixedCPHD', namespaces=good_xml['nsmap']).text = 'false'
return pvps, root, good_xml['nsmap']
def test_fxfixed(xml_without_fxfixed):
pvps, root, nsmap = xml_without_fxfixed
cphd_con = cphd_consistency.CphdConsistency(root, pvps=pvps, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
tocheck = ['check_channel_fxfixed_{}'.format(key) for key in pvps.keys()]
tocheck.append('check_file_fxfixed')
cphd_con.check(tocheck)
assert not cphd_con.failures()
@pytest.fixture
def xml_without_toafixed(good_xml):
root = copy_xml(good_xml['with_ns'])
pvps = {}
for channel_node in root.findall('./ns:Data/ns:Channel', namespaces=good_xml['nsmap']):
chan_id = channel_node.findtext('./ns:Identifier', namespaces=good_xml['nsmap'])
num_vect = int(channel_node.findtext('./ns:NumVectors', namespaces=good_xml['nsmap']))
pvps[chan_id] = np.zeros(num_vect, dtype=[('TOA1', 'f8'), ('TOA2', 'f8')])
pvps[chan_id]['TOA1'] = np.linspace(1.0, 1.1, num_vect)
pvps[chan_id]['TOA2'] = np.linspace(2.0, 2.2, num_vect)
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_id),
namespaces=good_xml['nsmap'])[0]
chan_param_node.find('./ns:TOAFixed', namespaces=good_xml['nsmap']).text = 'false'
root.find('./ns:Channel/ns:TOAFixedCPHD', namespaces=good_xml['nsmap']).text = 'false'
return pvps, root, good_xml['nsmap']
def test_channel_toafixed(xml_without_toafixed):
pvps, root, nsmap = xml_without_toafixed
cphd_con = cphd_consistency.CphdConsistency(root, pvps=pvps, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
tocheck = ['check_channel_toafixed_{}'.format(key) for key in pvps.keys()]
tocheck.append('check_file_toafixed')
cphd_con.check(tocheck)
assert not cphd_con.failures()
@pytest.fixture
def xml_without_srpfixed(good_xml):
root = copy_xml(good_xml['with_ns'])
pvps = {}
for channel_node in root.findall('./ns:Data/ns:Channel', namespaces=good_xml['nsmap']):
chan_id = channel_node.findtext('./ns:Identifier', namespaces=good_xml['nsmap'])
num_vect = int(channel_node.findtext('./ns:NumVectors', namespaces=good_xml['nsmap']))
pvps[chan_id] = np.zeros(num_vect, dtype=[('SRPPos', 'f8', 3)])
pvps[chan_id]['SRPPos'][:, 0] = np.linspace(1.0, 10, num_vect)
pvps[chan_id]['SRPPos'][:, 1] = np.linspace(2.0, 20, num_vect)
pvps[chan_id]['SRPPos'][:, 2] = np.linspace(3.0, 30, num_vect)
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_id),
namespaces=good_xml['nsmap'])[0]
chan_param_node.find('./ns:SRPFixed', namespaces=good_xml['nsmap']).text = 'false'
root.find('./ns:Channel/ns:SRPFixedCPHD', namespaces=good_xml['nsmap']).text = 'false'
return pvps, root, good_xml['nsmap']
def test_channel_srpfixed(xml_without_srpfixed):
pvps, root, nsmap = xml_without_srpfixed
cphd_con = cphd_consistency.CphdConsistency(root, pvps=pvps, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
tocheck = ['check_channel_srpfixed_{}'.format(key) for key in pvps.keys()]
tocheck.append('check_file_srpfixed')
cphd_con.check(tocheck)
assert not cphd_con.failures()
@pytest.fixture
def xml_with_txrcv(good_xml):
root = copy_xml(good_xml['with_ns'])
root.append(make_elem('TxRcv', namespace=good_xml['nsmap']['ns'], children=[
make_elem('NumTxWFs', 2, namespace=good_xml['nsmap']['ns']),
make_elem('TxWFParameters', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Identifier', 'wf_unit_test_1', namespace=good_xml['nsmap']['ns']),
]),
make_elem('TxWFParameters', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Identifier', 'wf_unit_test_2', namespace=good_xml['nsmap']['ns']),
]),
make_elem('NumRcvs', 2, namespace=good_xml['nsmap']['ns']),
make_elem('RcvParameters', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Identifier', 'rcv_unit_test_1', namespace=good_xml['nsmap']['ns']),
]),
make_elem('RcvParameters', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Identifier', 'rcv_unit_test_2', namespace=good_xml['nsmap']['ns']),
])
]))
chan_param_node = root.xpath('./ns:Channel/ns:Parameters',
namespaces=good_xml['nsmap'])[0]
chan_param_node.append(make_elem('TxRcv', namespace=good_xml['nsmap']['ns'], children=[
make_elem('TxWFId', 'wf_unit_test_1', namespace=good_xml['nsmap']['ns']),
make_elem('TxWFId', 'wf_unit_test_2', namespace=good_xml['nsmap']['ns']),
make_elem('RcvId', 'rcv_unit_test_1', namespace=good_xml['nsmap']['ns']),
make_elem('RcvId', 'rcv_unit_test_2', namespace=good_xml['nsmap']['ns']),
]))
chan_ids = [chan_param_node.findtext('./ns:Identifier', namespaces=good_xml['nsmap'])]
return chan_ids, root, good_xml['nsmap']
def test_txrcv(xml_with_txrcv):
chan_ids, root, nsmap = xml_with_txrcv
cphd_con = cphd_consistency.CphdConsistency(root, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
tocheck = ['check_channel_txrcv_exist_{}'.format(key) for key in chan_ids]
cphd_con.check(tocheck)
assert not cphd_con.failures()
def test_txrcv_bad_txwfid(xml_with_txrcv):
chan_ids, root, nsmap = xml_with_txrcv
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_ids[0]),
namespaces=nsmap)[0]
chan_param_node.xpath('./ns:TxRcv/ns:TxWFId', namespaces=nsmap)[-1].text = 'missing'
cphd_con = cphd_consistency.CphdConsistency(root, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
tocheck = ['check_channel_txrcv_exist_{}'.format(key) for key in chan_ids]
cphd_con.check(tocheck)
assert cphd_con.failures()
def test_antenna_bad_acf_count(good_xml):
root = copy_xml(good_xml['with_ns'])
antenna_node = root.find('./ns:Antenna', namespaces=good_xml['nsmap'])
antenna_node.xpath('./ns:NumACFs', namespaces=good_xml['nsmap'])[-1].text += '2'
cphd_con = cphd_consistency.CphdConsistency(root, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_antenna')
assert cphd_con.failures()
def test_antenna_bad_apc_count(good_xml):
root = copy_xml(good_xml['with_ns'])
antenna_node = root.find('./ns:Antenna', namespaces=good_xml['nsmap'])
antenna_node.xpath('./ns:NumAPCs', namespaces=good_xml['nsmap'])[-1].text += '2'
cphd_con = cphd_consistency.CphdConsistency(root, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_antenna')
assert cphd_con.failures()
def test_antenna_bad_antpats_count(good_xml):
root = copy_xml(good_xml['with_ns'])
antenna_node = root.find('./ns:Antenna', namespaces=good_xml['nsmap'])
antenna_node.xpath('./ns:NumAntPats', namespaces=good_xml['nsmap'])[-1].text += '2'
cphd_con = cphd_consistency.CphdConsistency(root, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_antenna')
assert cphd_con.failures()
def test_antenna_non_matching_acfids(good_xml):
root = copy_xml(good_xml['with_ns'])
antenna_node = root.find('./ns:Antenna', namespaces=good_xml['nsmap'])
antenna_node.xpath('./ns:AntPhaseCenter/ns:ACFId', namespaces=good_xml['nsmap'])[-1].text += '_wrong'
cphd_con = cphd_consistency.CphdConsistency(root, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_antenna')
assert cphd_con.failures()
def test_txrcv_bad_rcvid(xml_with_txrcv):
chan_ids, root, nsmap = xml_with_txrcv
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_ids[0]),
namespaces=nsmap)[0]
chan_param_node.xpath('./ns:TxRcv/ns:RcvId', namespaces=nsmap)[-1].text = 'missing'
cphd_con = cphd_consistency.CphdConsistency(root, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
tocheck = ['check_channel_txrcv_exist_{}'.format(key) for key in chan_ids]
cphd_con.check(tocheck)
assert cphd_con.failures()
def test_txrcv_missing_channel_node(xml_with_txrcv):
chan_ids, root, nsmap = xml_with_txrcv
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_ids[0]),
namespaces=nsmap)[0]
remove_nodes(*chan_param_node.findall('./ns:TxRcv', nsmap))
cphd_con = cphd_consistency.CphdConsistency(root, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_txrcv_ids_in_channel')
assert cphd_con.failures()
@pytest.fixture
def xml_with_fxbwnoise(good_xml):
root = copy_xml(good_xml['with_ns'])
pvps = {}
for channel_node in root.findall('./ns:Data/ns:Channel', namespaces=good_xml['nsmap']):
chan_id = channel_node.findtext('./ns:Identifier', namespaces=good_xml['nsmap'])
num_vect = int(channel_node.findtext('./ns:NumVectors', namespaces=good_xml['nsmap']))
pvps[chan_id] = np.zeros(num_vect, dtype=[('FXN1', 'f8'), ('FXN2', 'f8')])
pvps[chan_id]['FXN1'] = np.linspace(1, 2, num_vect)
pvps[chan_id]['FXN2'] = pvps[chan_id]['FXN1'] * 1.1
pvps[chan_id]['FXN1'][10] = np.nan
pvps[chan_id]['FXN2'][10] = np.nan
chan_param_node = root.xpath('./ns:Channel/ns:Parameters/ns:Identifier[text()="{}"]/..'.format(chan_id),
namespaces=good_xml['nsmap'])[0]
chan_param_node.append(make_elem('FxBWNoise', 1.2, namespace=good_xml['nsmap']['ns']))
return pvps, root, good_xml['nsmap']
def test_fxbwnoise(xml_with_fxbwnoise):
pvps, root, nsmap = xml_with_fxbwnoise
cphd_con = cphd_consistency.CphdConsistency(root, pvps=pvps, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
tocheck = ['check_channel_fxbwnoise_{}'.format(key) for key in pvps.keys()]
cphd_con.check(tocheck)
assert not cphd_con.failures()
def test_fxbwnoise_bad_domain(xml_with_fxbwnoise):
pvps, root, nsmap = xml_with_fxbwnoise
root.find('./ns:Global/ns:DomainType', namespaces=nsmap).text = 'TOA'
cphd_con = cphd_consistency.CphdConsistency(root, pvps=pvps, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
tocheck = ['check_channel_fxbwnoise_{}'.format(key) for key in pvps.keys()]
cphd_con.check(tocheck)
assert cphd_con.failures()
def test_fxbwnoise_bad_value(xml_with_fxbwnoise):
pvps, root, nsmap = xml_with_fxbwnoise
chan_id = list(pvps.keys())[-1]
pvps[chan_id]['FXN1'][0] = 0.5
cphd_con = cphd_consistency.CphdConsistency(root, pvps=pvps, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
tocheck = ['check_channel_fxbwnoise_{}'.format(key) for key in pvps.keys()]
cphd_con.check(tocheck)
assert cphd_con.failures()
def test_geoinfo_polygons(good_xml):
root = copy_xml(good_xml['with_ns'])
root.append(make_elem('GeoInfo', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Polygon', size='3', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Vertex', index='1', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Lat', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Lon', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='2', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Lat', 1.0, namespace=good_xml['nsmap']['ns']),
make_elem('Lon', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='3', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Lat', 1.0, namespace=good_xml['nsmap']['ns']),
make_elem('Lon', 1.0, namespace=good_xml['nsmap']['ns']),
]),
])
]))
cphd_con = cphd_consistency.CphdConsistency(root, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_geoinfo_polygons')
assert not cphd_con.failures()
def test_geoinfo_polygons_bad_order(good_xml):
root = copy_xml(good_xml['with_ns'])
root.append(make_elem('GeoInfo', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Polygon', size='3', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Vertex', index='1', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Lat', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Lon', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='2', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Lat', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Lon', 1.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='3', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Lat', 1.0, namespace=good_xml['nsmap']['ns']),
make_elem('Lon', 1.0, namespace=good_xml['nsmap']['ns']),
]),
])
]))
cphd_con = cphd_consistency.CphdConsistency(root, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_geoinfo_polygons')
assert cphd_con.failures()
@pytest.fixture
def xml_with_channel_imagearea(good_xml):
root = copy_xml(good_xml['with_ns'])
for chan_param_node in root.xpath('./ns:Channel/ns:Parameters', namespaces=good_xml['nsmap']):
chan_param_node.append(make_elem('ImageArea', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X1Y1', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', -50, namespace=good_xml['nsmap']['ns']),
make_elem('Y', -50, namespace=good_xml['nsmap']['ns']),
]),
make_elem('X2Y2', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 50, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 50, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Polygon', size='4', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Vertex', index='1', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', -50.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='2', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 50.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='3', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 50.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='4', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', -50.0, namespace=good_xml['nsmap']['ns']),
]),
])
]))
return root, good_xml['nsmap']
def test_channel_image_area(xml_with_channel_imagearea):
root, nsmap = xml_with_channel_imagearea
cphd_con = cphd_consistency.CphdConsistency(root, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
tocheck = []
for chan_id in root.findall('./ns:Data/ns:Channel/ns:Identifier', namespaces=nsmap):
tocheck.append('check_channel_imagearea_x1y1_{}'.format(chan_id.text))
tocheck.append('check_channel_imagearea_polygon_{}'.format(chan_id.text))
cphd_con.check(tocheck)
assert not cphd_con.failures()
@pytest.fixture
def xml_with_extendedarea(good_xml):
root = copy_xml(good_xml['with_ns'])
scene = root.find('./ns:SceneCoordinates', namespaces=good_xml['nsmap'])
scene.append(make_elem('ExtendedArea', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X1Y1', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', -1000, namespace=good_xml['nsmap']['ns']),
make_elem('Y', -1000, namespace=good_xml['nsmap']['ns']),
]),
make_elem('X2Y2', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 1000, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 1000, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Polygon', size='4', namespace=good_xml['nsmap']['ns'], children=[
make_elem('Vertex', index='1', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', -1000.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='2', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 1000.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='3', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 1000.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', 0.0, namespace=good_xml['nsmap']['ns']),
]),
make_elem('Vertex', index='4', namespace=good_xml['nsmap']['ns'], children=[
make_elem('X', 0.0, namespace=good_xml['nsmap']['ns']),
make_elem('Y', -1000.0, namespace=good_xml['nsmap']['ns']),
]),
])
]))
return root, good_xml['nsmap']
def test_extended_imagearea(xml_with_extendedarea):
root, nsmap = xml_with_extendedarea
cphd_con = cphd_consistency.CphdConsistency(root, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check(['check_extended_imagearea_polygon', 'check_extended_imagearea_x1y1_x2y2'])
assert not cphd_con.failures()
def test_extended_imagearea_polygon_bad_extent(xml_with_extendedarea):
root, nsmap = xml_with_extendedarea
root.find('./ns:SceneCoordinates/ns:ExtendedArea/ns:X2Y2/ns:X', namespaces=nsmap).text = '2000'
cphd_con = cphd_consistency.CphdConsistency(root, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_extended_imagearea_polygon')
assert cphd_con.failures()
def test_antenna_missing_channel_node(good_xml_with_antenna):
bad_xml = copy_xml(good_xml_with_antenna)
remove_nodes(*bad_xml.findall('./Channel/Parameters/Antenna'))
cphd_con = cphd_consistency.CphdConsistency(bad_xml, pvps=None, header=None, filename=None,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
cphd_con.check('check_antenna_ids_in_channel')
assert cphd_con.failures()
def test_refgeom_bad_root():
cphd_con = cphd_consistency.CphdConsistency.from_file(GOOD_CPHD,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
bad_node = cphd_con.xml.find('./ReferenceGeometry/SRPCODTime')
bad_node.text = '24' + bad_node.text
cphd_con.check('check_refgeom_root')
assert cphd_con.failures()
def test_refgeom_bad_monostatic():
cphd_con = cphd_consistency.CphdConsistency.from_file(GOOD_CPHD,
schema=DEFAULT_SCHEMA,
check_signal_data=False)
bad_node = cphd_con.xml.find('./ReferenceGeometry/Monostatic/AzimuthAngle')
bad_node.text = str((float(bad_node.text) + 3) % 360)
cphd_con.check('check_refgeom_monostatic')
assert cphd_con.failures()
def test_refgeom_bad_bistatic():
cphd_con = cphd_consistency.CphdConsistency.from_file(TEST_FILE_PATHS['bistatic'],
schema=DEFAULT_SCHEMA,
check_signal_data=False)
bad_node = cphd_con.xml.find('./ReferenceGeometry/Bistatic/RcvPlatform/SlantRange')
bad_node.text = '2' + bad_node.text
cphd_con.check('check_refgeom_bistatic')
assert cphd_con.failures()
| 45.26808 | 115 | 0.602231 |
7955fbff776afc6ea7de166e3a2604ed8bf44815 | 139 | py | Python | inapp_survey/apps.py | jpatel3/django-inapp-survey | f11818f7bbafe1cf38af91f2ec83cb3ad050091e | [
"Apache-2.0"
] | 3 | 2017-12-07T20:02:31.000Z | 2020-10-24T18:35:11.000Z | inapp_survey/apps.py | jpatel3/django-inapp-survey | f11818f7bbafe1cf38af91f2ec83cb3ad050091e | [
"Apache-2.0"
] | 6 | 2017-04-12T20:33:16.000Z | 2017-04-25T18:26:21.000Z | inapp_survey/apps.py | jpatel3/django-inapp-survey | f11818f7bbafe1cf38af91f2ec83cb3ad050091e | [
"Apache-2.0"
] | 2 | 2018-08-27T10:45:32.000Z | 2019-07-25T01:38:50.000Z | from __future__ import unicode_literals
from django.apps import AppConfig
class InappSurveyConfig(AppConfig):
name = 'inapp_survey'
| 17.375 | 39 | 0.805755 |
7955fc57576d3f97db0b8bbdcc2cf5cf6aba174e | 98 | py | Python | schema/token_schema.py | dcshoecousa/MimiWork | fdbb41163ac1216a8854c36ac8328fe8cc61a2d3 | [
"MIT"
] | 6 | 2021-11-11T11:01:21.000Z | 2022-01-06T18:22:04.000Z | services/api_service/schemas/token.py | datnguye/dbt-mssql | 8ecdff93e6dbae967de443a4fd122c54835d6c1f | [
"MIT"
] | null | null | null | services/api_service/schemas/token.py | datnguye/dbt-mssql | 8ecdff93e6dbae967de443a4fd122c54835d6c1f | [
"MIT"
] | 2 | 2021-11-13T11:33:19.000Z | 2021-11-24T05:35:58.000Z | from pydantic import BaseModel
class Token(BaseModel):
access_token: str
token_type: str | 16.333333 | 30 | 0.755102 |
7955fcbba4ca306f9ccacf0313f9808ed99999bd | 30,822 | py | Python | theano/compile/ops.py | brandonwillard/Theano | f375a0e999b950a81824a003f685b0bfd1c4e405 | [
"BSD-3-Clause"
] | null | null | null | theano/compile/ops.py | brandonwillard/Theano | f375a0e999b950a81824a003f685b0bfd1c4e405 | [
"BSD-3-Clause"
] | null | null | null | theano/compile/ops.py | brandonwillard/Theano | f375a0e999b950a81824a003f685b0bfd1c4e405 | [
"BSD-3-Clause"
] | 1 | 2020-08-15T17:09:10.000Z | 2020-08-15T17:09:10.000Z | """
This file contains auxiliary Ops, used during the compilation phase and Ops
building class (:class:`FromFunctionOp`) and decorator (:func:`as_op`) that
help make new Ops more rapidly.
"""
import copy
import warnings
from collections import OrderedDict
import numpy as np
import six.moves.cPickle as pickle
import theano
from theano.gof import Apply, Op, ParamsType, Variable
def register_view_op_c_code(type, code, version=()):
"""
Tell ViewOp how to generate C code for a Theano Type.
Parameters
----------
type : Theano type
It must be the Theano class itself and not an instance of the class.
code : C code
Returns a view for the Theano type 'type'. Use %(iname)s and %(oname)s
for the input and output C variable names respectively.
version
A number indicating the version of the code, for cache.
"""
ViewOp.c_code_and_version[type] = (code, version)
class ViewOp(Op):
"""
Returns an inplace view of the input. Used internally by Theano.
"""
view_map = {0: [0]}
# Mapping from Type to C code (and version) to use.
# In the C code, the name of the input variable is %(iname)s,
# the output variable is %(oname)s.
c_code_and_version = {}
__props__ = ()
_f16_ok = True
def make_node(self, x):
return Apply(self, [x], [x.type()])
def perform(self, node, inp, out):
(x,) = inp
(z,) = out
z[0] = x
def __str__(self):
return "%s" % self.__class__.__name__
def c_code(self, node, nodename, inp, out, sub):
(iname,) = inp
(oname,) = out
fail = sub["fail"]
itype = node.inputs[0].type.__class__
if itype in self.c_code_and_version:
code, version = self.c_code_and_version[itype]
return code % locals()
# Else, no C code
return super().c_code(node, nodename, inp, out, sub)
def c_code_cache_version(self):
version = []
# If any of the c code is unversionned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, v) in sorted(
self.c_code_and_version.items(), key=lambda pair: str(pair[0])
):
if not v:
warnings.warn(
"Type %s has C code for ViewOp, but it has no "
"version. You should add a 'version' keyword "
"arg when calling register_view_op_c_code." % t,
stacklevel=2,
)
return ()
version.append((str(t), v))
return tuple(version)
def infer_shape(self, node, input_shapes):
return input_shapes
def grad(self, args, g_outs):
return g_outs
view_op = ViewOp()
class OutputGuard(ViewOp):
"""
This op is used only internally by Theano.
Only the AddDestroyHandler optimizer tries to insert them in the graph.
This Op is declared as destructive while it is not destroying anything.
It returns a view. This is used to prevent destruction of the output
variables of a Theano function.
There is a mechanism in Theano that should prevent this, but the use
of OutputGuard adds a safeguard: it may be possible for some optimization
run before the add_destroy_handler phase to bypass this mechanism, by
making in-place optimizations.
TODO: find a current full explanation.
"""
destroy_map = {0: [0]}
check_input = False
_output_guard = OutputGuard()
def register_deep_copy_op_c_code(typ, code, version=()):
"""
Tell DeepCopyOp how to generate C code for a Theano Type.
Parameters
----------
typ : Theano type
It must be the Theano class itself and not an instance of the class.
code: C code
Deep copies the Theano type 'typ'. Use %(iname)s and %(oname)s for the
input and output C variable names respectively.
version
A number indicating the version of the code, for cache.
"""
DeepCopyOp.c_code_and_version[typ] = (code, version)
class DeepCopyOp(Op):
# Mapping from Type to C code (and version) to use.
# In the C code, the name of the input variable is %(iname)s,
# the output variable is %(oname)s.
c_code_and_version = {}
check_input = False
__props__ = ()
_f16_ok = True
def __init__(self):
pass
def make_node(self, x):
return Apply(self, [x], [x.type()])
def perform(self, node, args, outs):
if hasattr(args[0], "copy"):
# when args[0] is a an ndarray of 0 dimensions,
# this return a numpy.dtype and not an ndarray
# So when the args have a copy attribute we use it
# as this don't have this problem
outs[0][0] = args[0].copy()
else:
outs[0][0] = copy.deepcopy(args[0])
def c_code_cache_version(self):
version = []
# If any of the c code is unversionned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, v) in sorted(
self.c_code_and_version.items(), key=lambda pair: str(pair[0])
):
if not v:
warnings.warn(
"Type %s has C code for DeepCopyOp, but it has "
"no version. You should add a 'version' keyword"
" arg when calling "
"register_deep_copy_op_c_code." % t,
stacklevel=2,
)
return ()
version.append((str(t), v))
if version:
version.append(1)
return tuple(version)
def c_code(self, node, name, inames, onames, sub):
(iname,) = inames
(oname,) = onames
fail = sub["fail"]
itype = node.inputs[0].type.__class__
if itype in self.c_code_and_version:
code, version = self.c_code_and_version[itype]
return code % locals()
# Else, no C code
return super().c_code(node, name, inames, onames, sub)
deep_copy_op = DeepCopyOp()
def register_shape_c_code(type, code, version=()):
"""
Tell Shape Op how to generate C code for a Theano Type.
Parameters
----------
typ : Theano type
It must be the Theano class itself and not an instance of the class.
code : C code
Returns a vector representing the shape for the Theano type 'typ'.
Use %(iname)s and %(oname)s for the input and output C variable names
respectively.
version
A number indicating the version of the code, for cache.
"""
Shape.c_code_and_version[type] = (code, version)
class Shape(Op):
"""
L{Op} to return the shape of a matrix.
Notes
-----
Non-differentiable.
"""
_f16_ok = True
# Mapping from Type to C code (and version) to use.
# In the C code, the name of the input variable is %(iname)s,
# the output variable is %(oname)s.
c_code_and_version = {}
check_input = False
__props__ = ()
def make_node(self, x):
# Must work for all type that have a shape attribute.
# This will fail at execution time.
if not isinstance(x, theano.Variable):
x = theano.tensor.as_tensor_variable(x)
return Apply(self, [x], [theano.tensor.lvector()])
def perform(self, node, inp, out_):
(x,) = inp
(out,) = out_
out[0] = theano._asarray(x.shape, dtype="int64")
def infer_shape(self, node, in_shapes):
return [[len(in_shapes[0])]]
def connection_pattern(self, node):
# the grad returns the gradient with respect to the
# elements of a tensor variable
# the elements of the tensor variable do not participate
# in the computation of the shape, so they are not really
# part of the graph
return [[False]]
def grad(self, inp, grads):
# the grad returns the gradient with respect to the
# elements of a tensor variable
# the elements of the tensor variable do not participate
# in the computation of the shape, so they are not really
# part of the graph
return [theano.gradient.DisconnectedType()()]
def R_op(self, inputs, eval_points):
return [None]
def c_code(self, node, name, inames, onames, sub):
(iname,) = inames
(oname,) = onames
fail = sub["fail"]
itype = node.inputs[0].type.__class__
if itype in self.c_code_and_version:
code, version = self.c_code_and_version[itype]
return code % locals()
# Else, no C code
return super().c_code(node, name, inames, onames, sub)
def c_code_cache_version(self):
version = []
# If any of the c code is unversionned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, v) in sorted(
self.c_code_and_version.items(), key=lambda pair: str(pair[0])
):
if not v:
warnings.warn(
"Type %s has C code for Shape, but it has no "
"version. You should add a 'version' keyword "
"arg when calling register_shape_c_code." % t,
stacklevel=2,
)
return ()
version.append((str(t), v))
if version:
version.append(1)
return tuple(version)
shape = Shape()
_shape = shape # was used in the past, now use shape directly.
class Shape_i(Op):
"""
L{Op} to return the shape of a matrix.
Notes
-----
Non-differentiable.
"""
_f16_ok = True
# Mapping from Type to C code (and version) to use.
# In the C code, the name of the input variable is %(iname)s,
# the output variable is %(oname)s.
c_code_and_version = {}
check_input = False
__props__ = ("i",)
def __init__(self, i):
# As i will be used in the hash and that ndarray are not hashable,
# we need to convert it to an int as it is hashable.
if isinstance(i, np.ndarray):
assert i.dtype in theano.tensor.integer_dtypes
assert i == int(i)
i = int(i)
self.i = i
# NB:
# 1) params_type is defined as a property to avoid
# loop in Python import caused by importing theano.scalar below
# when params_type is defined directly in class code.
# 2) We wrap scalar into ParamsType (instead of directly using scalar as op param)
# to avoid Theano converting scalar param to constant that would be later
# hardcoded as litteral in C code, making us loose all the advantages of
# using params.
@property
def params_type(self):
return ParamsType(i=theano.scalar.basic.int64)
def __str__(self):
return "%s{%i}" % (self.__class__.__name__, self.i)
def make_node(self, x):
# x could be one of a number of types
# the only thing we require is that the variable have a .ndim,
# and that the value have a .shape
if not isinstance(x, theano.Variable):
raise TypeError("x must be Variable with ndim attribute", x)
if x.ndim <= self.i:
raise TypeError("x has too few dimensions for Shape_i", (x, self.i))
return theano.Apply(self, [x], [theano.tensor.lscalar()])
def perform(self, node, inp, out_, params):
(x,) = inp
(out,) = out_
if out[0] is None:
out[0] = theano._asarray(x.shape[self.i], dtype="int64")
else:
out[0][...] = x.shape[self.i]
def c_code_cache_version(self):
version = []
# If any of the c code is unversionned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, ci, v) in sorted(
self.c_code_and_version.items(), key=lambda pair: str(pair[0])
):
if not v:
warnings.warn(
"Type %s has C code for Shape_i, but it has "
"no version. You should add a 'version' keyword "
"arg when calling register_shape_i_c_code." % t,
stacklevel=2,
)
return ()
version.append((str(t), v))
if version:
version.append(2)
return tuple(version)
def c_code(self, node, name, inames, onames, sub):
(iname,) = inames
(oname,) = onames
fail = sub["fail"]
# i is then 'params->i', not just 'params'.
i = sub["params"] + "->i"
itype = node.inputs[0].type.__class__
if itype in self.c_code_and_version:
code, check_input, version = self.c_code_and_version[itype]
return (check_input + code) % locals()
# Else, no C code
return super().c_code(node, name, inames, onames, sub)
def infer_shape(self, node, input_shapes):
return [()]
def connection_pattern(self, node):
# the grad returns the gradient with respect to the
# elements of a tensor variable
# the elements of the tensor variable do not participate
# in the computation of the shape, so they are not really
# part of the graph
return [[False]]
def grad(self, inp, grads):
return [
theano.gradient.grad_not_implemented(
op=self,
x_pos=0,
x=inp[0],
comment=("No gradient for the shape of a matrix " "is implemented."),
)
]
def shape_i(var, i, fgraph=None):
"""
Equivalent of var.shape[i], but apply if possible the shape feature
optimization.
This is useful in optimization that need to get the shape. This
remove the need of the following shape_feature optimization that
convert it. So this speed up optimization and remove Equilibrium
max iteration problems.
Parameters
----------
var
The variable we want to take the shape of.
i
The shape dimensions we want
fgraph : optional
If var.fgraph do not exist, the fgraph that have the shape_feature to
introduce var in to get the optimized shape.
"""
if fgraph is None and hasattr(var, "fgraph"):
fgraph = var.fgraph
if fgraph and hasattr(fgraph, "shape_feature"):
shape_feature = fgraph.shape_feature
shape_of = shape_feature.shape_of
def recur(node):
if not node.outputs[0] in shape_of:
for inp in node.inputs:
if inp.owner:
recur(inp.owner)
# If the output var isn't marked as being in the graph,
# we need to add it in the ShapeFeature.
shape_feature.on_import(fgraph, node, "gof.ops.shape_i")
if var not in shape_of:
recur(var.owner)
return shape_of[var][i]
# If we are not able to use the shape feature, we should not put
# Shape_i in the graph. Otherwise, the shape feature optimization
# won't get applied.
return var.shape[i]
def shape_i_op(i):
key = i
if key not in shape_i_op.cache:
shape_i_op.cache[key] = Shape_i(i)
return shape_i_op.cache[key]
shape_i_op.cache = {}
def register_shape_i_c_code(typ, code, check_input, version=()):
"""
Tell Shape_i how to generate C code for a Theano Type.
Parameters
----------
typ : Theano type
It must be the Theano class itself and not an instance of the class.
code : C code
Gets the shape of dimensions %(i)s for the Theano type 'typ'.
Use %(iname)s and %(oname)s for the input and output C variable names
respectively.
version
A number indicating the version of the code, for cache.
"""
Shape_i.c_code_and_version[typ] = (code, check_input, version)
# List of Theano Types that one can add an extra dimension and for which
# Scan can deal with.
expandable_types = ()
def load_back(mod, name):
__import__(mod)
import sys
module = sys.modules[mod]
obj = getattr(module, name)
return obj
class FromFunctionOp(Op):
"""
Build a basic Theano Op around a function.
Since the resulting Op is very basic and is missing most of the
optional functionalities, some optimizations may not apply. If you
want to help, you can supply an infer_shape function that computes
the shapes of the output given the shapes of the inputs.
Also the gradient is undefined in the resulting op and Theano will
raise an error if you attempt to get the gradient of a graph
containing this op.
"""
def __init__(self, fn, itypes, otypes, infer_shape):
self.__fn = fn
self.itypes = itypes
self.otypes = otypes
self.__infer_shape = infer_shape
if self.__infer_shape is not None:
self.infer_shape = self._infer_shape
def __eq__(self, other):
return type(self) == type(other) and self.__fn == other.__fn
def __hash__(self):
return hash(type(self)) ^ hash(self.__fn)
def __str__(self):
return "FromFunctionOp{%s}" % self.__fn.__name__
def perform(self, node, inputs, outputs):
outs = self.__fn(*inputs)
if not isinstance(outs, (list, tuple)):
outs = (outs,)
assert len(outs) == len(outputs)
for i in range(len(outs)):
outputs[i][0] = outs[i]
def __reduce__(self):
mod = self.__fn.__module__
name = self.__fn.__name__
try:
obj = load_back(mod, name)
except (ImportError, KeyError, AttributeError):
raise pickle.PicklingError(
"Can't pickle as_op(), not found as {}.{}".format(mod, name)
)
else:
if obj is not self:
raise pickle.PicklingError(
"Can't pickle as_op(), not the object " "at %s.%s" % (mod, name)
)
return load_back, (mod, name)
def _infer_shape(self, node, input_shapes):
return self.__infer_shape(node, input_shapes)
def as_op(itypes, otypes, infer_shape=None):
"""
Decorator that converts a function into a basic Theano op that will call
the supplied function as its implementation.
It takes an optional infer_shape parameter that should be a callable with
this signature:
def infer_shape(node, input_shapes):
...
return output_shapes
Here `input_shapes` and `output_shapes` are lists of tuples that represent
the shape of the corresponding inputs/outputs.
This should not be used when performance is a concern since the very basic
nature of the resulting Op may interfere with certain graph optimizations.
Examples
--------
@as_op(itypes=[theano.tensor.fmatrix, theano.tensor.fmatrix],
otypes=[theano.tensor.fmatrix])
def numpy_dot(a, b):
return numpy.dot(a, b)
"""
if not isinstance(itypes, (list, tuple)):
itypes = [itypes]
if any(not isinstance(t, theano.Type) for t in itypes):
raise TypeError("itypes has to be a list of Theano types")
if not isinstance(otypes, (list, tuple)):
otypes = [otypes]
if any(not isinstance(t, theano.Type) for t in otypes):
raise TypeError("otypes has to be a list of Theano types")
# make sure they are lists and not tuples
itypes = list(itypes)
otypes = list(otypes)
if infer_shape is not None and not callable(infer_shape):
raise TypeError("infer_shape needs to be a callable")
def make_op(fn):
return FromFunctionOp(fn, itypes, otypes, infer_shape)
return make_op
def register_rebroadcast_c_code(typ, code, version=()):
"""
Tell Rebroadcast how to generate C code for a Theano Type.
typ : Theano type
It must be the Theano class itself and not an instance of the class.
code : C code
That checks if the dimension %(axis)s is of shape 1 for the Theano type
'typ'. Use %(iname)s and %(oname)s for the input and output C variable
names respectively, and %(axis)s for the axis that we need to check.
This code is put in a loop for all axes.
version
A number indicating the version of the code, for cache.
"""
Rebroadcast.c_code_and_version[typ] = (code, version)
class Rebroadcast(Op):
"""
Change the input's broadcastable fields in some predetermined way.
See Also
--------
unbroadcast <theano.tensor.unbroadcast>
addbroadcast <theano.tensor.addbroadcast>
patternbroadcast <theano.tensor.patternbroadcast>
Notes
-----
Works inplace and works for CudaNdarrayType.
Examples
--------
`Rebroadcast((0, True), (1, False))(x)` would make `x` broadcastable in
axis 0 and not broadcastable in axis 1.
"""
view_map = {0: [0]}
_f16_ok = True
# Mapping from Type to C code (and version) to use.
# In the C code, the name of the input variable is %(iname)s,
# the output variable is %(oname)s.
c_code_and_version = {}
check_input = False
__props__ = ("axis",)
_f16_ok = True
def __init__(self, *axis):
# Sort them to make sure we merge all possible case.
items = sorted(axis)
self.axis = OrderedDict(items)
for axis, broad in self.axis.items():
if not isinstance(axis, (np.integer, int)):
raise TypeError(
"Rebroadcast needs integer axes. " "Got {}".format(axis)
)
if not isinstance(broad, (np.bool_, bool)):
raise TypeError(
"Rebroadcast needs bool for new broadcast "
"pattern. Got {}".format(broad)
)
def __hash__(self):
# Need special __hash__ as dict aren't hashable.
# no ambiguity because each item key is unique
items = sorted(self.axis.items())
return hash((type(self), tuple(items)))
def __str__(self):
if len(self.axis) == 0:
broadcast_pattern = []
else:
broadcast_pattern = ["?" for i in range(1 + max(self.axis.keys()))]
for k, v in self.axis.items():
broadcast_pattern[k] = str(int(v))
return "{}{{{}}}".format(self.__class__.__name__, ",".join(broadcast_pattern))
def make_node(self, x):
if self.axis.keys() and (x.ndim <= max(self.axis.keys())):
raise ValueError("Trying to rebroadcast non-existent dimension")
t = x.type.clone(
broadcastable=[
self.axis.get(i, b) for i, b in enumerate(x.type.broadcastable)
]
)
return Apply(self, [x], [t()])
def perform(self, node, inp, out_):
(x,) = inp
(out,) = out_
for axis, value in self.axis.items():
if value and x.shape[axis] != 1:
raise ValueError(
"Dimension %s in Rebroadcast's input was"
" supposed to be 1 (got %s instead)" % (axis, x.shape[axis])
)
out[0] = x
def grad(self, inp, grads):
(x,) = inp
(gz,) = grads
# restore the broadcasting pattern of the input
return (
Rebroadcast(
*[
(axis, x.type.broadcastable[axis])
for axis, value in self.axis.items()
]
)(gz),
)
def infer_shape(self, node, ishapes):
assert len(ishapes) == 1
l = []
one = theano.tensor.basic.constant(1)
for ax in range(len(ishapes[0])):
if self.axis.get(ax, False):
l.append(one)
else:
l.append(ishapes[0][ax])
return [tuple(l)]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
return [None]
return self(*eval_points, **dict(return_list=True))
def c_code(self, node, nodename, inp, out, sub):
(iname,) = inp
(oname,) = out
fail = sub["fail"]
itype = node.inputs[0].type.__class__
if itype in self.c_code_and_version:
code, version = self.c_code_and_version[itype]
final_code = ""
for axis, value in self.axis.items():
if value:
final_code += code % locals()
return (
final_code
+ """
Py_XDECREF(%(oname)s);
%(oname)s = %(iname)s;
Py_XINCREF(%(oname)s);
"""
% locals()
)
return super().c_code(node, nodename, inp, out, sub)
def c_code_cache_version(self):
version = []
# If any of the c code is unversionned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, v) in sorted(
self.c_code_and_version.items(), key=lambda pair: str(pair[0])
):
if not v:
warnings.warn(
"Type %s has C code for Rebroadcast, but it "
"has no version. You should add a 'version' "
"keyword arg when calling "
"register_rebroadcast_c_code." % t,
stacklevel=2,
)
return ()
version.append((str(t), v))
if version:
version.append(1)
return tuple(version)
def register_specify_shape_c_code(typ, code, version=(), c_support_code_apply=None):
"""
Tell SpecifyShape how to generate C code for a Theano Type.
Parameters
----------
typ : Theano type
It must be the Theano class itself and not an instance of the class.
code : C code
Checks the shape and returns a view for the Theano type 'typ'.
Use %(iname)s and %(oname)s for the input and output C variable names
respectively. %(shape)s is the vector of shape of %(iname)s.
Check that its length is good.
version
A number indicating the version of the code, for cache.
c_support_code_apply
Extra code.
"""
SpecifyShape.c_code_and_version[typ] = (code, version, c_support_code_apply)
class SpecifyShape(Op):
"""
L{Op} that puts into the graph the user-provided shape.
In the case where this op stays in the final graph, we assert the shape.
For this the output of this op must be used in the graph. This is not
the case most of the time if we only take the shape of the output.
Maybe there are other optimizations that will mess with this.
Notes
-----
Maybe in the future we will never do the assert!
We currently don't support specifying partial shape information.
TODO : test this op with sparse. Do C code for them too.
"""
view_map = {0: [0]}
# Mapping from Type to C code (and version) to use.
# In the C code, the name of the input variable is %(iname)s,
# the output variable is %(oname)s.
c_code_and_version = {}
__props__ = ()
_f16_ok = True
def make_node(self, x, shape):
if not isinstance(x, Variable):
x = theano.tensor.as_tensor_variable(x)
shape = theano.tensor.as_tensor_variable(shape)
assert shape.ndim == 1
assert shape.dtype in theano.tensor.integer_dtypes
if isinstance(shape, theano.tensor.TensorConstant):
assert shape.data.size == x.ndim
return Apply(self, [x, shape], [x.type()])
def perform(self, node, inp, out_):
x, shape = inp
(out,) = out_
assert x.ndim == shape.size
assert np.all(x.shape == shape), ("got shape", x.shape, "expected", shape)
out[0] = x
def infer_shape(self, node, shapes):
xshape, sshape = shapes
new_shape = []
for dim in range(node.inputs[0].ndim):
try:
s = theano.tensor.get_scalar_constant_value(node.inputs[1][dim])
s = theano.tensor.as_tensor_variable(s)
new_shape.append(s)
except theano.tensor.NotScalarConstantError:
new_shape.append(node.inputs[1][dim])
assert len(new_shape) == len(xshape)
return [new_shape]
def connection_pattern(self, node):
return [[True], [False]]
def grad(self, inp, grads):
x, s = inp
(gz,) = grads
# Should I set an SpecifyShape on gz? I think so
# But I don't do it now as we need to make an optimization
# to remove that op from the graph to don't block other optimization
# Should I do an optimizer that will remove the SpecifyShape?
# I think Yes
return [gz, theano.gradient.DisconnectedType()()]
return [specify_shape(gz, s), theano.gradient.DisconnectedType()()]
def R_op(self, inputs, eval_points):
if eval_points[0] is None:
# It means that the this op sits on top of a non-differentiable
# path
return [None]
return self.make_node(eval_points[0], *inputs[1:]).outputs
def c_support_code_apply(self, node, name):
itype = node.inputs[0].type.__class__
if itype in self.c_code_and_version:
_, _, support_code = self.c_code_and_version[itype]
if support_code:
return support_code
return super().c_support_code_apply(node, name)
def c_code(self, node, name, inames, onames, sub):
iname, shape = inames
(oname,) = onames
fail = sub["fail"]
itype = node.inputs[0].type.__class__
if itype in self.c_code_and_version:
code, version, _ = self.c_code_and_version[itype]
return code % locals()
return super().c_code(node, node, inames, onames, sub)
def c_code_cache_version(self):
version = []
# If any of the c code is unversionned, we have to return ()
# Else, we will return a list of (type name, version) pairs.
for t, (c, v, _) in sorted(
self.c_code_and_version.items(), key=lambda pair: str(pair[0])
):
if not v:
warnings.warn(
"Type %s has C code for SpecifyShape, but it "
"has no version. You should add a 'version' "
"keyword arg when calling "
"register_specify_shape_c_code." % t,
stacklevel=2,
)
return ()
version.append((str(t), v))
return tuple(version)
specify_shape = SpecifyShape()
| 31.873837 | 86 | 0.588378 |
7955fdc5b6702592b8ad002c268404d44f4d482c | 526 | py | Python | src/event/migrations/0019_alter_workerimage_image.py | Niel-Richards/festival_accreditation | cfc837c29ead7679a30bf52e10ace75e868d4c4b | [
"MIT"
] | null | null | null | src/event/migrations/0019_alter_workerimage_image.py | Niel-Richards/festival_accreditation | cfc837c29ead7679a30bf52e10ace75e868d4c4b | [
"MIT"
] | null | null | null | src/event/migrations/0019_alter_workerimage_image.py | Niel-Richards/festival_accreditation | cfc837c29ead7679a30bf52e10ace75e868d4c4b | [
"MIT"
] | null | null | null | # Generated by Django 3.2 on 2021-10-20 01:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('event', '0018_alter_workerimage_image'),
]
operations = [
migrations.AlterField(
model_name='workerimage',
name='image',
field=models.ImageField(default="C:\\Users\\O'Niel Richards\\Documents\\workspace\\festival2\\media/avatar.png", upload_to='photo/<function upload_location at 0x04A98780>'),
),
]
| 27.684211 | 185 | 0.65019 |
7955fef71a5fe52a59524dd43efb2e04e34da1e3 | 3,391 | py | Python | tests/unit/jaml/test_type_parse.py | pete-sk/jina | e09ec8204e2e5a42e744898fb0cd74251e989146 | [
"Apache-2.0"
] | 1 | 2021-04-11T08:06:05.000Z | 2021-04-11T08:06:05.000Z | tests/unit/jaml/test_type_parse.py | pete-sk/jina | e09ec8204e2e5a42e744898fb0cd74251e989146 | [
"Apache-2.0"
] | 1 | 2021-07-16T17:36:22.000Z | 2021-09-22T13:48:18.000Z | tests/unit/jaml/test_type_parse.py | pete-sk/jina | e09ec8204e2e5a42e744898fb0cd74251e989146 | [
"Apache-2.0"
] | null | null | null | import pytest
from jina.executors import BaseExecutor
from jina.jaml import JAML, JAMLCompatible
from jina import __default_executor__, requests
class MyExecutor(BaseExecutor):
pass
def test_non_empty_reg_tags():
assert JAML.registered_tags()
assert __default_executor__ in JAML.registered_tags()
@pytest.mark.parametrize(
'include_unk, expected',
[
(
True,
'''
jtype: BaseExecutor {}
jtype: Blah {}
''',
),
(
False,
'''
jtype: BaseExecutor {}
!Blah {}
''',
),
],
)
def test_include_unknown(include_unk, expected):
y = '''
!BaseExecutor {}
!Blah {}
'''
assert JAML.escape(y, include_unknown_tags=include_unk).strip() == expected.strip()
@pytest.mark.parametrize(
'original, escaped',
[
(
'''
!BaseExecutor {}
!Blah {}
!MyExecutor {}
''',
'''
jtype: BaseExecutor {}
!Blah {}
jtype: MyExecutor {}
''',
),
(
'''
!BaseExecutor
with:
a: 123
b: BaseExecutor
jtype: unknown-blah
''',
'''
jtype: BaseExecutor
with:
a: 123
b: BaseExecutor
jtype: unknown-blah
''',
),
],
)
def test_escape(original, escaped):
assert JAML.escape(original, include_unknown_tags=False).strip() == escaped.strip()
assert (
JAML.unescape(
JAML.escape(original, include_unknown_tags=False),
include_unknown_tags=False,
).strip()
== original.strip()
)
class MyExec(BaseExecutor):
@requests
def foo(self, **kwargs):
pass
def test_cls_from_tag():
assert JAML.cls_from_tag('MyExec') == MyExec
assert JAML.cls_from_tag('!MyExec') == MyExec
assert JAML.cls_from_tag('BaseExecutor') == BaseExecutor
assert JAML.cls_from_tag('Nonexisting') is None
@pytest.mark.parametrize(
'field_name, override_field',
[
('with', None),
('metas', None),
('requests', None),
('with', {'a': 456, 'b': 'updated-test'}),
(
'metas',
{'name': 'test-name-updated', 'workspace': 'test-work-space-updated'},
),
('requests', {'/foo': 'baz'}),
# assure py_modules only occurs once #3830
(
'metas',
{
'name': 'test-name-updated',
'workspace': 'test-work-space-updated',
'py_modules': 'test_module.py',
},
),
],
)
def test_override_yml_params(field_name, override_field):
original_raw_yaml = {
'jtype': 'SimpleIndexer',
'with': {'a': 123, 'b': 'test'},
'metas': {'name': 'test-name', 'workspace': 'test-work-space'},
'requests': {'/foo': 'bar'},
}
updated_raw_yaml = original_raw_yaml
JAMLCompatible()._override_yml_params(updated_raw_yaml, field_name, override_field)
if override_field:
assert updated_raw_yaml[field_name] == override_field
else:
assert original_raw_yaml == updated_raw_yaml
# assure we don't create py_modules twice
if override_field == 'metas' and 'py_modules' in override_field:
assert 'py_modules' in updated_raw_yaml['metas']
assert 'py_modules' not in updated_raw_yaml
| 24.049645 | 87 | 0.558242 |
7955ff1eaeb8fd7c79aafdcade51c599f5cc4d35 | 856 | py | Python | Servus/plugins/arduino_bmp085/widget.py | sug4rok/Servus | 9840d0e275085c08b99fc7662eb22f2ab253d8f8 | [
"MIT"
] | null | null | null | Servus/plugins/arduino_bmp085/widget.py | sug4rok/Servus | 9840d0e275085c08b99fc7662eb22f2ab253d8f8 | [
"MIT"
] | null | null | null | Servus/plugins/arduino_bmp085/widget.py | sug4rok/Servus | 9840d0e275085c08b99fc7662eb22f2ab253d8f8 | [
"MIT"
] | null | null | null | # coding=utf-8
from plugins.utils import get_used_plugins_by, get_latest_sensor_value
from climate.models import PressureValue
def get_widget_data(plan_id):
"""
Функция, предоставляющая данные атмосферного давления для каждого добавленого датчика BMP085/BMP180
:param plan_id: int ID планировки.
:returns: list Список кортежей с данными атмосферного давления и координатами расположения.
виджетов.
"""
sensors = get_used_plugins_by(package='plugins.arduino_bmp085')
sensors = [s for s in sensors if s.plan_image_id == plan_id]
values = [get_latest_sensor_value(PressureValue, sensor) for sensor in sensors]
return [(plan_id, v.content_object.name, v.content_object.horiz_position,
v.content_object.vert_position, v.content_object.level,
v.pressure) for v in values if v is not None]
| 37.217391 | 103 | 0.75 |
7955ff72c6e15b2f71f797bb947dc8134e19f3a3 | 2,692 | py | Python | integreat_cms/cms/migrations/0032_region_bounding_box.py | Integreat/cms-django | ab0a89576ae901f4b30aa8e9c65ff43c44654a80 | [
"Apache-2.0"
] | 21 | 2018-10-26T20:10:45.000Z | 2020-10-22T09:41:46.000Z | integreat_cms/cms/migrations/0032_region_bounding_box.py | Integreat/cms-django | ab0a89576ae901f4b30aa8e9c65ff43c44654a80 | [
"Apache-2.0"
] | 392 | 2018-10-25T08:34:07.000Z | 2020-11-19T08:20:30.000Z | integreat_cms/cms/migrations/0032_region_bounding_box.py | digitalfabrik/integreat-cms | ab0a89576ae901f4b30aa8e9c65ff43c44654a80 | [
"Apache-2.0"
] | 23 | 2019-03-06T17:11:35.000Z | 2020-10-16T04:36:41.000Z | # Generated by Django 3.2.13 on 2022-06-15 10:58
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
"""
Add region bounding box fields
"""
dependencies = [
("cms", "0031_unique_version_constraint"),
]
operations = [
migrations.AddField(
model_name="region",
name="latitude_max",
field=models.FloatField(
blank=True,
help_text="The top boundary of the region",
null=True,
validators=[
django.core.validators.MinValueValidator(-90.0),
django.core.validators.MaxValueValidator(90.0),
],
verbose_name="maximum latitude",
),
),
migrations.AddField(
model_name="region",
name="latitude_min",
field=models.FloatField(
blank=True,
help_text="The bottom boundary of the region",
null=True,
validators=[
django.core.validators.MinValueValidator(-90.0),
django.core.validators.MaxValueValidator(90.0),
],
verbose_name="minimum latitude",
),
),
migrations.AddField(
model_name="region",
name="longitude_max",
field=models.FloatField(
blank=True,
help_text="The right boundary of the region",
null=True,
validators=[
django.core.validators.MinValueValidator(-180.0),
django.core.validators.MaxValueValidator(180.0),
],
verbose_name="maximum longitude",
),
),
migrations.AddField(
model_name="region",
name="longitude_min",
field=models.FloatField(
blank=True,
help_text="The left boundary of the region",
null=True,
validators=[
django.core.validators.MinValueValidator(-180.0),
django.core.validators.MaxValueValidator(180.0),
],
verbose_name="minimum longitude",
),
),
migrations.AlterField(
model_name="region",
name="postal_code",
field=models.CharField(
help_text="For districts, enter the postcode of the administrative headquarters.",
max_length=10,
verbose_name="postal code",
),
),
]
| 32.433735 | 98 | 0.502229 |
7955ff9f43aa0c8a8d8895e74de1a18ac73acee7 | 1,383 | py | Python | boolean2/odict.py | AbrahmAB/booleannet | a07124047d18a5b7265e050a234969ac58970c7a | [
"MIT"
] | null | null | null | boolean2/odict.py | AbrahmAB/booleannet | a07124047d18a5b7265e050a234969ac58970c7a | [
"MIT"
] | null | null | null | boolean2/odict.py | AbrahmAB/booleannet | a07124047d18a5b7265e050a234969ac58970c7a | [
"MIT"
] | null | null | null | #
# a dictionary-like class that maintains the order of insertion
#
# based on a recipe by Igor Ghisi located at
#
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/496761
#
from UserDict import DictMixin
class odict(DictMixin):
"""
>>> o = odict()
>>> o[2]=20 ; o[1]=10
>>> o.keys()
[2, 1]
>>> o.values()
[20, 10]
>>> o.items()
[(2, 20), (1, 10)]
>>> [ x for x in o ]
[2, 1]
>>>
>>> d = dict()
>>> d[2]=20 ; d[1]=10
>>> d.keys()
[1, 2]
>>> d.values()
[10, 20]
>>> d.items()
[(1, 10), (2, 20)]
>>> [ x for x in d ]
[1, 2]
"""
def __init__(self, **kwds):
self._keys = []
self._data = {}
for key, value in kwds.items():
self[key] = value
def __setitem__(self, key, value):
if key not in self._data:
self._keys.append(key)
self._data[key] = value
def __getitem__(self, key):
return self._data[key]
def __delitem__(self, key):
del self._data[key]
self._keys.remove(key)
def keys(self):
return list(self._keys)
def copy(self):
copyDict = odict()
copyDict._data = self._data.copy()
copyDict._keys = self._keys[:]
return copyDict
if __name__ == '__main__':
import doctest
doctest.testmod() | 21.609375 | 64 | 0.503254 |
7956006ef76cd0cabd62c3f3889f27d7f942d654 | 19,895 | py | Python | third_party/ros_aarch64/lib/python2.7/dist-packages/tf2_msgs/msg/_LookupTransformActionResult.py | silverland79/apollo1.0 | 6e725e8dd5013b769efa18f43e5ae675f4847fbd | [
"Apache-2.0"
] | 2 | 2018-01-29T03:10:39.000Z | 2020-12-08T09:08:41.000Z | third_party/ros_x86_64/lib/python2.7/dist-packages/tf2_msgs/msg/_LookupTransformActionResult.py | silverland79/apollo1.0 | 6e725e8dd5013b769efa18f43e5ae675f4847fbd | [
"Apache-2.0"
] | null | null | null | third_party/ros_x86_64/lib/python2.7/dist-packages/tf2_msgs/msg/_LookupTransformActionResult.py | silverland79/apollo1.0 | 6e725e8dd5013b769efa18f43e5ae675f4847fbd | [
"Apache-2.0"
] | 3 | 2018-01-29T12:22:56.000Z | 2020-12-08T09:08:46.000Z | # This Python file uses the following encoding: utf-8
"""autogenerated by genpy from tf2_msgs/LookupTransformActionResult.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import geometry_msgs.msg
import genpy
import actionlib_msgs.msg
import tf2_msgs.msg
import std_msgs.msg
class LookupTransformActionResult(genpy.Message):
_md5sum = "ac26ce75a41384fa8bb4dc10f491ab90"
_type = "tf2_msgs/LookupTransformActionResult"
_has_header = True #flag to mark the presence of a Header object
_full_text = """# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
Header header
actionlib_msgs/GoalStatus status
LookupTransformResult result
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: actionlib_msgs/GoalStatus
GoalID goal_id
uint8 status
uint8 PENDING = 0 # The goal has yet to be processed by the action server
uint8 ACTIVE = 1 # The goal is currently being processed by the action server
uint8 PREEMPTED = 2 # The goal received a cancel request after it started executing
# and has since completed its execution (Terminal State)
uint8 SUCCEEDED = 3 # The goal was achieved successfully by the action server (Terminal State)
uint8 ABORTED = 4 # The goal was aborted during execution by the action server due
# to some failure (Terminal State)
uint8 REJECTED = 5 # The goal was rejected by the action server without being processed,
# because the goal was unattainable or invalid (Terminal State)
uint8 PREEMPTING = 6 # The goal received a cancel request after it started executing
# and has not yet completed execution
uint8 RECALLING = 7 # The goal received a cancel request before it started executing,
# but the action server has not yet confirmed that the goal is canceled
uint8 RECALLED = 8 # The goal received a cancel request before it started executing
# and was successfully cancelled (Terminal State)
uint8 LOST = 9 # An action client can determine that a goal is LOST. This should not be
# sent over the wire by an action server
#Allow for the user to associate a string with GoalStatus for debugging
string text
================================================================================
MSG: actionlib_msgs/GoalID
# The stamp should store the time at which this goal was requested.
# It is used by an action server when it tries to preempt all
# goals that were requested before a certain time
time stamp
# The id provides a way to associate feedback and
# result message with specific goal requests. The id
# specified must be unique.
string id
================================================================================
MSG: tf2_msgs/LookupTransformResult
# ====== DO NOT MODIFY! AUTOGENERATED FROM AN ACTION DEFINITION ======
geometry_msgs/TransformStamped transform
tf2_msgs/TF2Error error
================================================================================
MSG: geometry_msgs/TransformStamped
# This expresses a transform from coordinate frame header.frame_id
# to the coordinate frame child_frame_id
#
# This message is mostly used by the
# <a href="http://www.ros.org/wiki/tf">tf</a> package.
# See its documentation for more information.
Header header
string child_frame_id # the frame id of the child frame
Transform transform
================================================================================
MSG: geometry_msgs/Transform
# This represents the transform between two coordinate frames in free space.
Vector3 translation
Quaternion rotation
================================================================================
MSG: geometry_msgs/Vector3
# This represents a vector in free space.
# It is only meant to represent a direction. Therefore, it does not
# make sense to apply a translation to it (e.g., when applying a
# generic rigid transformation to a Vector3, tf2 will only apply the
# rotation). If you want your data to be translatable too, use the
# geometry_msgs/Point message instead.
float64 x
float64 y
float64 z
================================================================================
MSG: geometry_msgs/Quaternion
# This represents an orientation in free space in quaternion form.
float64 x
float64 y
float64 z
float64 w
================================================================================
MSG: tf2_msgs/TF2Error
uint8 NO_ERROR = 0
uint8 LOOKUP_ERROR = 1
uint8 CONNECTIVITY_ERROR = 2
uint8 EXTRAPOLATION_ERROR = 3
uint8 INVALID_ARGUMENT_ERROR = 4
uint8 TIMEOUT_ERROR = 5
uint8 TRANSFORM_ERROR = 6
uint8 error
string error_string
"""
__slots__ = ['header','status','result']
_slot_types = ['std_msgs/Header','actionlib_msgs/GoalStatus','tf2_msgs/LookupTransformResult']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,status,result
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(LookupTransformActionResult, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = tf2_msgs.msg.LookupTransformResult()
else:
self.header = std_msgs.msg.Header()
self.status = actionlib_msgs.msg.GoalStatus()
self.result = tf2_msgs.msg.LookupTransformResult()
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_B.pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_3I.pack(_x.result.transform.header.seq, _x.result.transform.header.stamp.secs, _x.result.transform.header.stamp.nsecs))
_x = self.result.transform.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.result.transform.child_frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_7dB.pack(_x.result.transform.transform.translation.x, _x.result.transform.transform.translation.y, _x.result.transform.transform.translation.z, _x.result.transform.transform.rotation.x, _x.result.transform.transform.rotation.y, _x.result.transform.transform.rotation.z, _x.result.transform.transform.rotation.w, _x.result.error.error))
_x = self.result.error.error_string
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = tf2_msgs.msg.LookupTransformResult()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _struct_B.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
_x = self
start = end
end += 12
(_x.result.transform.header.seq, _x.result.transform.header.stamp.secs, _x.result.transform.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.result.transform.header.frame_id = str[start:end].decode('utf-8')
else:
self.result.transform.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.result.transform.child_frame_id = str[start:end].decode('utf-8')
else:
self.result.transform.child_frame_id = str[start:end]
_x = self
start = end
end += 57
(_x.result.transform.transform.translation.x, _x.result.transform.transform.translation.y, _x.result.transform.transform.translation.z, _x.result.transform.transform.rotation.x, _x.result.transform.transform.rotation.y, _x.result.transform.transform.rotation.z, _x.result.transform.transform.rotation.w, _x.result.error.error,) = _struct_7dB.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.result.error.error_string = str[start:end].decode('utf-8')
else:
self.result.error.error_string = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_2I.pack(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs))
_x = self.status.goal_id.id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
buff.write(_struct_B.pack(self.status.status))
_x = self.status.text
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_3I.pack(_x.result.transform.header.seq, _x.result.transform.header.stamp.secs, _x.result.transform.header.stamp.nsecs))
_x = self.result.transform.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self.result.transform.child_frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_7dB.pack(_x.result.transform.transform.translation.x, _x.result.transform.transform.translation.y, _x.result.transform.transform.translation.z, _x.result.transform.transform.rotation.x, _x.result.transform.transform.rotation.y, _x.result.transform.transform.rotation.z, _x.result.transform.transform.rotation.w, _x.result.error.error))
_x = self.result.error.error_string
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(locals().get('_x', self)))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(locals().get('_x', self)))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.status is None:
self.status = actionlib_msgs.msg.GoalStatus()
if self.result is None:
self.result = tf2_msgs.msg.LookupTransformResult()
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 8
(_x.status.goal_id.stamp.secs, _x.status.goal_id.stamp.nsecs,) = _struct_2I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.goal_id.id = str[start:end].decode('utf-8')
else:
self.status.goal_id.id = str[start:end]
start = end
end += 1
(self.status.status,) = _struct_B.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.status.text = str[start:end].decode('utf-8')
else:
self.status.text = str[start:end]
_x = self
start = end
end += 12
(_x.result.transform.header.seq, _x.result.transform.header.stamp.secs, _x.result.transform.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.result.transform.header.frame_id = str[start:end].decode('utf-8')
else:
self.result.transform.header.frame_id = str[start:end]
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.result.transform.child_frame_id = str[start:end].decode('utf-8')
else:
self.result.transform.child_frame_id = str[start:end]
_x = self
start = end
end += 57
(_x.result.transform.transform.translation.x, _x.result.transform.transform.translation.y, _x.result.transform.transform.translation.z, _x.result.transform.transform.rotation.x, _x.result.transform.transform.rotation.y, _x.result.transform.transform.rotation.z, _x.result.transform.transform.rotation.w, _x.result.error.error,) = _struct_7dB.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.result.error.error_string = str[start:end].decode('utf-8')
else:
self.result.error.error_string = str[start:end]
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_3I = struct.Struct("<3I")
_struct_B = struct.Struct("<B")
_struct_2I = struct.Struct("<2I")
_struct_7dB = struct.Struct("<7dB")
| 39.009804 | 370 | 0.628449 |
7956015004a392bd7649f084effc01bafb8aa254 | 2,661 | py | Python | {{cookiecutter.project_slug}}/controller.py | bingweichen/fic_flask_create_app | e55fb9e53122ff98676e0c622c65a952de50a01c | [
"MIT"
] | null | null | null | {{cookiecutter.project_slug}}/controller.py | bingweichen/fic_flask_create_app | e55fb9e53122ff98676e0c622c65a952de50a01c | [
"MIT"
] | null | null | null | {{cookiecutter.project_slug}}/controller.py | bingweichen/fic_flask_create_app | e55fb9e53122ff98676e0c622c65a952de50a01c | [
"MIT"
] | null | null | null | # !/usr/bin/env python
# -*- coding: utf-8 -*-
from flask import g
from flask_restplus import Resource, Namespace, fields
from app.{{cookiecutter.project_slug}} import service as {{cookiecutter.project_slug}}_service
from common.decorators import arguments_parser, catch_error
from common.responses import created, ok
from app.user.service import token_required
from app.{{cookiecutter.project_slug}}.model import {{cookiecutter.ClassName}}
api = Namespace('{{cookiecutter.project_slug}}', path='/{{cookiecutter.project_slug}}')
{{cookiecutter.project_slug}}_model = api.model('login', {
# 'username': fields.String(required=True, description='user username'),
# 'password': fields.String(required=True, description='user password '),
})
@api.route('')
class {{cookiecutter.ClassName}}sResource(Resource):
"""{{cookiecutter.ClassName}} Resource"""
@api.marshal_with({{cookiecutter.project_slug}}_model)
def get(self):
"""获取所有 {{cookiecutter.project_slug}}"""
return {{cookiecutter.ClassName}}.query.filter_by().all()
@api.doc(description='create {{cookiecutter.project_slug}}')
@api.expect({{cookiecutter.project_slug}}_model, validate=True)
@arguments_parser
@catch_error
@token_required
def post(self):
"""创建 {{cookiecutter.project_slug}}"""
data = g.args
current_user = g.user
result = {{cookiecutter.project_slug}}_service.create_{{cookiecutter.project_slug}}(data, current_user)
return created(result)
@api.route('/<string:{{cookiecutter.project_slug}}_id>')
class {{cookiecutter.ClassName}}Resource(Resource):
@api.marshal_with({{cookiecutter.project_slug}}_model)
def get(self, {{cookiecutter.project_slug}}_id):
"""获取{{cookiecutter.project_slug}}"""
return {{cookiecutter.ClassName}}.query.filter_by(id={{cookiecutter.project_slug}}_id).first()
@token_required
def delete(self, {{cookiecutter.project_slug}}_id):
"""删除{{cookiecutter.project_slug}}"""
result = {{cookiecutter.project_slug}}_service.delete_{{cookiecutter.project_slug}}({{cookiecutter.project_slug}}_id)
return ok(result, "delete success")
@api.expect({{cookiecutter.project_slug}}_model, validate=True)
@arguments_parser
@catch_error
@token_required
@api.marshal_with({{cookiecutter.project_slug}}_model)
def put(self, {{cookiecutter.project_slug}}_id):
"""更新{{cookiecutter.project_slug}}"""
result = {{cookiecutter.project_slug}}_service.update_{{cookiecutter.project_slug}}({{cookiecutter.project_slug}}_id={{cookiecutter.project_slug}}_id, data=g.args)
return result
| 40.318182 | 171 | 0.71289 |
7956030a2563c04ca3761886ef523e69de13a563 | 2,045 | py | Python | twill/extensions/match_parse.py | rickproza/twill | 7a98e4912a8ff929a94e35d35e7a027472ee4f46 | [
"MIT"
] | 13 | 2020-04-18T15:17:58.000Z | 2022-02-24T13:25:46.000Z | twill/extensions/match_parse.py | rickproza/twill | 7a98e4912a8ff929a94e35d35e7a027472ee4f46 | [
"MIT"
] | 5 | 2020-04-04T21:16:00.000Z | 2022-02-10T00:26:20.000Z | twill/extensions/match_parse.py | rickproza/twill | 7a98e4912a8ff929a94e35d35e7a027472ee4f46 | [
"MIT"
] | 3 | 2020-06-06T17:26:19.000Z | 2022-02-10T00:30:39.000Z | """
Suresh's extension for slicing and dicing variables with regular expressions.
"""
import re
from twill import browser, log
from twill.namespaces import get_twill_glocals
def showvar(which):
""">> showvar var
Shows the value of the variable 'var'.
"""
global_dict, local_dict = get_twill_glocals()
d = global_dict.copy()
d.update(local_dict)
log.info(d.get(str(which)))
def split(what):
""">> split <regex>
Sets __matchlist__ to re.split(regex, page).
"""
page = browser.html
m = re.split(what, page)
global_dict, local_dict = get_twill_glocals()
local_dict['__matchlist__'] = m
def findall(what):
""">> findall <regex>
Sets __matchlist__ to re.findall(regex, page).
"""
page = browser.html
regex = re.compile(what, re.DOTALL)
m = regex.findall(page)
global_dict, local_dict = get_twill_glocals()
local_dict['__matchlist__'] = m
def getmatch(where, what):
""">> getmatch into_var expression
Evaluates an expression against __match__ and puts it into 'into_var'.
"""
global_dict, local_dict = get_twill_glocals()
match = local_dict['__match__']
local_dict[where] = _do_eval(match, what)
def setmatch(what):
""">> setmatch expression
Sets each element __matchlist__ to eval(expression); 'm' is set
to each element of __matchlist__ prior to processing.
"""
global_dict, local_dict = get_twill_glocals()
match = local_dict['__matchlist__']
if isinstance(match, str):
match = [match]
new_match = [_do_eval(m, what) for m in match]
local_dict['__matchlist__'] = new_match
def _do_eval(match, exp):
"""Used internally to evaluate an expression."""
return eval(exp, globals(), {'m': match})
def popmatch(which):
""">> popmatch index
Pops __matchlist__[i] into __match__.
"""
global_dict, local_dict = get_twill_glocals()
matchlist = local_dict['__matchlist__']
match = matchlist.pop(int(which))
local_dict['__match__'] = match
| 22.228261 | 77 | 0.667482 |
79560333320518d11d8277843febb06b082dc082 | 12,622 | py | Python | tests/system/libraries/NvdaLib.py | moyanming/nvda | 78b54ecbbb509e9633f6d5e58a06d67706578dec | [
"bzip2-1.0.6"
] | null | null | null | tests/system/libraries/NvdaLib.py | moyanming/nvda | 78b54ecbbb509e9633f6d5e58a06d67706578dec | [
"bzip2-1.0.6"
] | null | null | null | tests/system/libraries/NvdaLib.py | moyanming/nvda | 78b54ecbbb509e9633f6d5e58a06d67706578dec | [
"bzip2-1.0.6"
] | null | null | null | # A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2020 NV Access Limited
# This file may be used under the terms of the GNU General Public License, version 2 or later.
# For more details see: https://www.gnu.org/licenses/gpl-2.0.html
"""This file provides robot library functions for NVDA system tests.
It contains helper methods for system tests, most specifically related to NVDA
- setup config,
- starting
- quiting
- config cleanup
This is in contrast with the `SystemTestSpy/speechSpy*.py files,
which provide library functions related to monitoring NVDA and asserting NVDA output.
"""
# imported methods start with underscore (_) so they don't get imported into robot files as keywords
from os.path import join as _pJoin, abspath as _abspath, expandvars as _expandvars
import tempfile as _tempFile
from typing import Optional
from urllib.parse import quote as _quoteStr
from robotremoteserver import (
test_remote_server as _testRemoteServer,
stop_remote_server as _stopRemoteServer,
)
from SystemTestSpy import (
_blockUntilConditionMet,
_getLib,
_nvdaSpyAlias,
configManager
)
# Imported for type information
from robot.libraries.BuiltIn import BuiltIn
from robot.libraries.OperatingSystem import OperatingSystem as _OpSysLib
from robot.libraries.Process import Process as _Process
from robot.libraries.Remote import Remote as _Remote
builtIn: BuiltIn = BuiltIn()
opSys: _OpSysLib = _getLib('OperatingSystem')
process: _Process = _getLib('Process')
class _NvdaLocationData:
def __init__(self):
# robot is expected to be run from the NVDA repo root directory. We want all repo specific
# paths to be relative to this. This would allow us to change where it is run from if we decided to.
self.repoRoot = _abspath("./")
self.stagingDir = _tempFile.gettempdir()
opSys.directory_should_exist(self.stagingDir)
self.whichNVDA = builtIn.get_variable_value("${whichNVDA}", "source")
self._installFilePath = builtIn.get_variable_value("${installDir}", None)
self.NVDAInstallerCommandline = None
if self.whichNVDA == "source":
self._runNVDAFilePath = _pJoin(self.repoRoot, "runnvda.bat")
self.baseNVDACommandline = self._runNVDAFilePath
elif self.whichNVDA == "installed":
self._runNVDAFilePath = self.findInstalledNVDAPath()
self.baseNVDACommandline = f'"{str(self._runNVDAFilePath)}"'
if self._installFilePath is not None:
self.NVDAInstallerCommandline = f'"{str(self._installFilePath)}"'
else:
raise AssertionError("RobotFramework should be run with argument: '-v whichNVDA:[source|installed]'")
self.profileDir = _pJoin(self.stagingDir, "nvdaProfile")
self.logPath = _pJoin(self.profileDir, 'nvda.log')
self.preservedLogsDir = _pJoin(
builtIn.get_variable_value("${OUTPUT DIR}"),
"nvdaTestRunLogs"
)
def findInstalledNVDAPath(self) -> Optional[str]:
NVDAFilePath = _pJoin(_expandvars('%PROGRAMFILES%'), 'nvda', 'nvda.exe')
legacyNVDAFilePath = _pJoin(_expandvars('%PROGRAMFILES%'), 'NVDA', 'nvda.exe')
exeErrorMsg = f"Unable to find installed NVDA exe. Paths tried: {NVDAFilePath}, {legacyNVDAFilePath}"
try:
opSys.file_should_exist(NVDAFilePath)
return NVDAFilePath
except AssertionError:
# Older versions of NVDA (<=2020.4) install the exe in NVDA\nvda.exe
opSys.file_should_exist(legacyNVDAFilePath, exeErrorMsg)
return legacyNVDAFilePath
def ensureInstallerPathsExist(self):
fileWarnMsg = f"Unable to run NVDA installer unless path exists. Path given: {self._installFilePath}"
opSys.file_should_exist(self._installFilePath, fileWarnMsg)
opSys.create_directory(self.profileDir)
opSys.create_directory(self.preservedLogsDir)
def ensurePathsExist(self):
fileWarnMsg = f"Unable to run NVDA installer unless path exists. Path given: {self._runNVDAFilePath}"
opSys.file_should_exist(self._runNVDAFilePath, fileWarnMsg)
opSys.create_directory(self.profileDir)
opSys.create_directory(self.preservedLogsDir)
_locations = _NvdaLocationData()
class NvdaLib:
"""Robot Framework library for interacting with NVDA.
Notable:
- NvdaLib.nvdaSpy is a library instance for getting speech and other information out of NVDA
"""
def __init__(self):
self.nvdaSpy = None #: Optional[SystemTestSpy.speechSpyGlobalPlugin.NVDASpyLib]
self.nvdaHandle: Optional[int] = None
@staticmethod
def _createTestIdFileName(name):
suiteName = builtIn.get_variable_value("${SUITE NAME}")
testName = builtIn.get_variable_value("${TEST NAME}")
outputFileName = f"{suiteName}-{testName}-{name}".replace(" ", "_")
outputFileName = _quoteStr(outputFileName)
return outputFileName
@staticmethod
def setup_nvda_profile(configFileName):
configManager.setupProfile(
_locations.repoRoot,
configFileName,
_locations.stagingDir
)
@staticmethod
def teardown_nvda_profile():
configManager.teardownProfile(
_locations.stagingDir
)
nvdaProcessAlias = 'nvdaAlias'
_spyServerPort = 8270 # is `registered by IANA` for remote server usage. Two ASCII values:'RF'
_spyServerURI = f'http://127.0.0.1:{_spyServerPort}'
_spyAlias = _nvdaSpyAlias
def _startNVDAProcess(self):
"""Start NVDA.
Use debug logging, replacing any current instance, using the system test profile directory
"""
_locations.ensurePathsExist()
command = (
f"{_locations.baseNVDACommandline}"
f" --debug-logging"
f" -r"
f" -c \"{_locations.profileDir}\""
f" --log-file \"{_locations.logPath}\""
)
self.nvdaHandle = handle = process.start_process(
command,
shell=True,
alias=self.nvdaProcessAlias,
stdout=_pJoin(_locations.preservedLogsDir, self._createTestIdFileName("stdout.txt")),
stderr=_pJoin(_locations.preservedLogsDir, self._createTestIdFileName("stderr.txt")),
)
return handle
def _startNVDAInstallerProcess(self):
"""Start NVDA Installer.
Use debug logging, replacing any current instance, using the system test profile directory
"""
_locations.ensureInstallerPathsExist()
command = (
f"{_locations.NVDAInstallerCommandline}"
f" --debug-logging"
f" -r"
f" -c \"{_locations.profileDir}\""
f" --log-file \"{_locations.logPath}\""
)
self.nvdaHandle = handle = process.start_process(
command,
shell=True,
alias=self.nvdaProcessAlias,
stdout=_pJoin(_locations.preservedLogsDir, self._createTestIdFileName("stdout.txt")),
stderr=_pJoin(_locations.preservedLogsDir, self._createTestIdFileName("stderr.txt")),
)
return handle
def _connectToRemoteServer(self, connectionTimeoutSecs=10):
"""Connects to the nvdaSpyServer
Because we do not know how far through the startup NVDA is, we have to poll
to check that the server is available. Importing the library immediately seems
to succeed, but then calling a keyword later fails with RuntimeError:
"Connection to remote server broken: [Errno 10061]
No connection could be made because the target machine actively refused it"
Instead we wait until the remote server is available before importing the library and continuing.
"""
builtIn.log(f"Waiting for {self._spyAlias} to be available at: {self._spyServerURI}", level='DEBUG')
# Importing the 'Remote' library always succeeds, even when a connection can not be made.
# If that happens, then some 'Remote' keyword will fail at some later point.
# therefore we use '_testRemoteServer' to ensure that we can in fact connect before proceeding.
_blockUntilConditionMet(
getValue=lambda: _testRemoteServer(self._spyServerURI, log=False),
giveUpAfterSeconds=connectionTimeoutSecs,
errorMessage=f"Unable to connect to {self._spyAlias}",
)
builtIn.log(f"Connecting to {self._spyAlias}", level='DEBUG')
# If any remote call takes longer than this, the connection will be closed!
maxRemoteKeywordDurationSeconds = 30
builtIn.import_library(
"Remote", # name of library to import
# Arguments to construct the library instance:
f"uri={self._spyServerURI}",
f"timeout={maxRemoteKeywordDurationSeconds}",
# Set an alias for the imported library instance
"WITH NAME",
self._spyAlias,
)
builtIn.log(f"Getting {self._spyAlias} library instance", level='DEBUG')
self.nvdaSpy = self._addMethodsToSpy(builtIn.get_library_instance(self._spyAlias))
# Ensure that keywords timeout before `timeout` given to `Remote` library,
# otherwise we lose control over NVDA.
self.nvdaSpy.init_max_keyword_duration(maxSeconds=maxRemoteKeywordDurationSeconds)
@staticmethod
def _addMethodsToSpy(remoteLib: _Remote):
""" Adds a method for each keywords on the remote library.
@param remoteLib: the library to augment with methods.
@rtype: SystemTestSpy.speechSpyGlobalPlugin.NVDASpyLib
@return: The library augmented with methods for all keywords.
"""
# Add methods back onto the lib so they can be called directly rather than manually calling run_keyword
def _makeKeywordCaller(lib, keyword):
def runKeyword(*args, **kwargs):
builtIn.log(
f"{keyword}"
f"{f' {args}' if args else ''}"
f"{f' {kwargs}' if kwargs else ''}"
)
return lib.run_keyword(keyword, args, kwargs)
return runKeyword
for name in remoteLib.get_keyword_names():
setattr(
remoteLib,
name,
_makeKeywordCaller(remoteLib, name)
)
return remoteLib
def start_NVDAInstaller(self, settingsFileName):
builtIn.log(f"Starting NVDA with config: {settingsFileName}")
self.setup_nvda_profile(settingsFileName)
nvdaProcessHandle = self._startNVDAInstallerProcess()
process.process_should_be_running(nvdaProcessHandle)
# Timeout is increased due to the installer load time and start up splash sound
self._connectToRemoteServer(connectionTimeoutSecs=30)
self.nvdaSpy.wait_for_NVDA_startup_to_complete()
return nvdaProcessHandle
def start_NVDA(self, settingsFileName):
builtIn.log(f"Starting NVDA with config: {settingsFileName}")
self.setup_nvda_profile(settingsFileName)
nvdaProcessHandle = self._startNVDAProcess()
process.process_should_be_running(nvdaProcessHandle)
self._connectToRemoteServer()
self.nvdaSpy.wait_for_NVDA_startup_to_complete()
return nvdaProcessHandle
def save_NVDA_log(self):
"""NVDA logs are saved to the ${OUTPUT DIR}/nvdaTestRunLogs/${SUITE NAME}-${TEST NAME}-nvda.log"""
builtIn.log("Saving NVDA log")
saveToPath = self.create_preserved_test_output_filename("nvda.log")
opSys.copy_file(
_locations.logPath,
saveToPath
)
builtIn.log(f"Log saved to: {saveToPath}", level='DEBUG')
def create_preserved_test_output_filename(self, fileName):
"""EG for nvda.log path will become:
${OUTPUT DIR}/nvdaTestRunLogs/${SUITE NAME}-${TEST NAME}-nvda.log
"""
return _pJoin(_locations.preservedLogsDir, self._createTestIdFileName(fileName))
def quit_NVDA(self):
builtIn.log("Stopping nvdaSpy server: {}".format(self._spyServerURI))
try:
_stopRemoteServer(self._spyServerURI, log=False)
process.run_process(
f"{_locations.baseNVDACommandline} -q --disable-addons",
shell=True,
)
process.wait_for_process(self.nvdaHandle)
except Exception:
raise
finally:
self.save_NVDA_log()
# remove the spy so that if nvda is run manually against this config it does not interfere.
self.teardown_nvda_profile()
def quit_NVDAInstaller(self):
builtIn.log("Stopping nvdaSpy server: {}".format(self._spyServerURI))
self.nvdaSpy.emulateKeyPress("insert+q")
self.nvdaSpy.wait_for_specific_speech("Exit NVDA")
self.nvdaSpy.emulateKeyPress("enter", blockUntilProcessed=False)
builtIn.sleep(1)
try:
_stopRemoteServer(self._spyServerURI, log=False)
except Exception:
raise
finally:
self.save_NVDA_log()
# remove the spy so that if nvda is run manually against this config it does not interfere.
self.teardown_nvda_profile()
def getSpyLib():
""" Gets the spy library instance. This has been augmented with methods for all supported keywords.
Requires NvdaLib and nvdaSpy (remote library - see speechSpyGlobalPlugin) to be initialised.
On failure check order of keywords in Robot log and NVDA log for failures.
@rtype: SystemTestSpy.speechSpyGlobalPlugin.NVDASpyLib
@return: Remote NVDA spy Robot Framework library.
"""
nvdaLib = _getLib("NvdaLib")
spy = nvdaLib.nvdaSpy
if spy is None:
raise AssertionError("Spy not yet available, check order of keywords and NVDA log for errors.")
return spy
| 39.198758 | 106 | 0.744256 |
795603665a621e76ede82a6e21de6af1fa7f8a21 | 902 | py | Python | xchainpy/xchainpy_crypto/xchainpy_crypto/models/KdfParams.py | SLjavad/xchainpy-lib | e79b1fd341adaf9267964f3368500dd48de60917 | [
"MIT"
] | 8 | 2021-02-16T23:14:14.000Z | 2022-03-22T09:35:58.000Z | xchainpy/xchainpy_crypto/xchainpy_crypto/models/KdfParams.py | SLjavad/xchainpy-lib | e79b1fd341adaf9267964f3368500dd48de60917 | [
"MIT"
] | 12 | 2021-04-06T19:31:46.000Z | 2022-03-22T14:34:13.000Z | xchainpy/xchainpy_crypto/xchainpy_crypto/models/KdfParams.py | SLjavad/xchainpy-lib | e79b1fd341adaf9267964f3368500dd48de60917 | [
"MIT"
] | 10 | 2021-03-04T05:45:31.000Z | 2022-03-31T14:44:01.000Z | class KdfParams:
def __init__(self, prf:str , dklen:int , salt:str , c:int):
self._prf = prf
self._dklen = dklen
self._salt = salt
self._c = c
@classmethod
def from_dict(cls, kdfparams):
new_kdfparams = cls.__new__(cls)
for key in kdfparams:
setattr(new_kdfparams, key, kdfparams[key])
return new_kdfparams
@property
def prf(self):
return self._prf
@prf.setter
def prf(self, prf):
self._prf = prf
@property
def dklen(self):
return self._dklen
@dklen.setter
def dklen(self, dklen):
self._dklen = dklen
@property
def salt(self):
return self._salt
@salt.setter
def salt(self, salt):
self._salt = salt
@property
def c(self):
return self._c
@c.setter
def c(self, c):
self._c = c | 20.044444 | 63 | 0.553215 |
79560476da801df9be680f37601283e401ddf943 | 1,281 | py | Python | dtaidistance/clustering/visualization.py | hakimakbarmaulana/dtaidistance | ddf4a8111732d4429686d96c9195a81151be1dd8 | [
"Apache-2.0"
] | 711 | 2017-02-07T07:24:58.000Z | 2022-03-31T07:46:47.000Z | dtaidistance/clustering/visualization.py | hakimakbarmaulana/dtaidistance | ddf4a8111732d4429686d96c9195a81151be1dd8 | [
"Apache-2.0"
] | 142 | 2018-04-09T10:36:11.000Z | 2022-03-31T11:30:26.000Z | dtaidistance/clustering/visualization.py | hakimakbarmaulana/dtaidistance | ddf4a8111732d4429686d96c9195a81151be1dd8 | [
"Apache-2.0"
] | 155 | 2017-06-01T08:37:45.000Z | 2022-03-23T08:50:13.000Z |
from ..exceptions import MatplotlibException
def prepare_plot_options(show_ts_label, show_tr_label):
if show_ts_label is True:
show_ts_label = lambda idx: str(int(idx))
elif show_ts_label is False or show_ts_label is None:
show_ts_label = lambda idx: ""
elif callable(show_ts_label):
pass
elif hasattr(show_ts_label, "__getitem__"):
show_ts_label_prev = show_ts_label
show_ts_label = lambda idx: show_ts_label_prev[idx]
else:
raise AttributeError("Unknown type for show_ts_label, expecting boolean, subscriptable or callable, "
"got {}".format(type(show_ts_label)))
if show_tr_label is True:
show_tr_label = lambda dist: "{:.2f}".format(dist)
elif show_tr_label is False or show_tr_label is None:
show_tr_label = lambda dist: ""
elif callable(show_tr_label):
pass
elif hasattr(show_tr_label, "__getitem__"):
show_tr_label_prev = show_tr_label
show_tr_label = lambda idx: show_tr_label_prev[idx]
else:
raise AttributeError("Unknown type for show_ts_label, expecting boolean, subscriptable or callable, "
"got {}".format(type(show_ts_label)))
return show_ts_label, show_tr_label
| 37.676471 | 109 | 0.678376 |
795604f9aaf0002baa996f411d96c70a1cf03112 | 1,769 | py | Python | robot_view/urls.py | Raoul1996/robot_view_be | b50c261b4a62779b7536670463cf39599c91bc4d | [
"MIT"
] | 1 | 2018-07-04T18:29:43.000Z | 2018-07-04T18:29:43.000Z | robot_view/urls.py | Raoul1996/robot_view_be | b50c261b4a62779b7536670463cf39599c91bc4d | [
"MIT"
] | 8 | 2018-07-22T03:41:24.000Z | 2022-03-11T23:21:22.000Z | robot_view/urls.py | Raoul1996/robot_view_be | b50c261b4a62779b7536670463cf39599c91bc4d | [
"MIT"
] | null | null | null | """robot_view URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from rest_framework.schemas import get_schema_view
from rest_framework.documentation import include_docs_urls
from rest_framework.routers import DefaultRouter
# from rest_framework.authtoken import views
from rest_framework_jwt.views import obtain_jwt_token
from users.views import SMSCodeViewSet, UserViewSets
from info.views import RobotInfoViewSet
from robot.views import RobotDataViewSet
router = DefaultRouter()
schema_view = get_schema_view(title="Server Monitoring API")
router.register(r'users', UserViewSets, base_name='users')
router.register(r'code', SMSCodeViewSet, base_name='code')
router.register(r'info', RobotInfoViewSet, base_name='info')
router.register(r'robot', RobotDataViewSet, base_name='robot')
urlpatterns = [
path('', include(router.urls)),
path('docs/', include_docs_urls(title='robot_view documents')),
path('admin/', admin.site.urls),
path('schema/', schema_view),
path('login/', obtain_jwt_token),
path('api-auth/', include('rest_framework.urls')),
# path('api-token-auth/', views.obtain_auth_token)
]
| 40.204545 | 77 | 0.75636 |
795605379ac682eb6a4741e36494e7c81d6f8ad3 | 8,046 | py | Python | scrapy_httpcache/downloadermiddlewares/httpcache.py | vuchau/scrapy-httpcache | abef71aafe43b7449469df94101407184bb9d1a6 | [
"BSD-3-Clause"
] | 9 | 2018-03-06T06:42:42.000Z | 2022-01-16T01:56:25.000Z | scrapy_httpcache/downloadermiddlewares/httpcache.py | vuchau/scrapy-httpcache | abef71aafe43b7449469df94101407184bb9d1a6 | [
"BSD-3-Clause"
] | 3 | 2018-03-12T09:48:42.000Z | 2021-04-09T09:07:17.000Z | scrapy_httpcache/downloadermiddlewares/httpcache.py | vuchau/scrapy-httpcache | abef71aafe43b7449469df94101407184bb9d1a6 | [
"BSD-3-Clause"
] | 10 | 2018-06-13T02:46:17.000Z | 2022-01-23T04:37:49.000Z | import logging
import six
from scrapy import Spider, signals
from scrapy.http import Request, Response
from scrapy.downloadermiddlewares.httpcache import HttpCacheMiddleware, load_object, \
NotConfigured, formatdate, IgnoreRequest
from scrapy.settings import Settings
from scrapy.utils.request import request_fingerprint
from scrapy_httpcache import signals as httpcache_signals
from twisted.internet import defer
from twisted.internet.defer import returnValue
def check_banned(spider, request, response= None, exception=None):
if response.status >= 400:
return True
logger = logging.getLogger(__name__)
class AsyncHttpCacheMiddleware(HttpCacheMiddleware):
@classmethod
def from_crawler(cls, crawler):
o = cls(crawler.settings, crawler.stats)
crawler.signals.connect(o.spider_opened, signal=signals.spider_opened)
crawler.signals.connect(o.spider_closed, signal=signals.spider_closed)
crawler.signals.connect(o.remove_banned, signal=httpcache_signals.remove_banned)
return o
def __init__(self, settings, stats):
if not settings.getbool('HTTPCACHE_ENABLED'):
raise NotConfigured
self.policy = load_object(settings['HTTPCACHE_POLICY'])(settings)
self.storage = load_object(settings['HTTPCACHE_STORAGE'])(settings)
self.banned_storage = load_object(settings['BANNED_STORAGE'])(settings) \
if settings.get('BANNED_STORAGE') else None
self.request_error_storage = load_object(settings['REQUEST_ERROR_STORAGE'])(settings) \
if settings.get('REQUEST_ERROR_STORAGE') else None
self.ignore_missing = settings.getbool('HTTPCACHE_IGNORE_MISSING')
self.check_banned = load_object(settings['CHECK_BANNED']) if settings.get('CHECK_BANNED') else check_banned
self.stats = stats
def spider_opened(self, spider):
self.storage.open_spider(spider)
if self.banned_storage:
self.banned_storage.open_spider(spider)
if self.request_error_storage:
self.request_error_storage.open_spider(spider)
logger.info('{middleware} opend {plugin}'.format(
middleware=self.__class__.__name__,
plugin='with plugin: {} {}'.format(
self.request_error_storage.__class__.__name__+',' if self.request_error_storage else '',
self.banned_storage.__class__.__name__ if self.banned_storage else ''
)
)) if any((self.request_error_storage, self.banned_storage)) else ''
def spider_closed(self, spider):
self.storage.close_spider(spider)
if self.banned_storage:
self.banned_storage.close_spider(spider)
if self.request_error_storage:
self.request_error_storage.close_spider(spider)
logger.info('{middleware} closed'.format(middleware=self.__class__.__name__))
@defer.inlineCallbacks
def process_request(self, request, spider):
if request.meta.get('dont_cache', False):
if six.PY2:
returnValue()
else:
return
# Skip uncacheable requests
if not self.policy.should_cache_request(request):
request.meta['_dont_cache'] = True # flag as uncacheable
if six.PY2:
returnValue()
else:
return
# Look for cached response and check if expired
cachedresponse = yield self.storage.retrieve_response(spider, request)
if cachedresponse is None:
self.stats.inc_value('httpcache/miss', spider=spider)
if self.ignore_missing:
self.stats.inc_value('httpcache/ignore', spider=spider)
raise IgnoreRequest("Ignored request not in cache: %s" % request)
if six.PY2:
returnValue()
else:
return # first time request
# Return cached response only if not expired
cachedresponse.flags.append('cached')
if self.policy.is_cached_response_fresh(cachedresponse, request):
self.stats.inc_value('httpcache/hit', spider=spider)
if six.PY2:
returnValue(cachedresponse)
else:
return cachedresponse
# Keep a reference to cached response to avoid a second cache lookup on
# process_response hook
request.meta['cached_response'] = cachedresponse
@defer.inlineCallbacks
def process_response(self, request, response, spider):
if self.banned_storage and self.check_banned(spider, request, response):
yield self._save_banned_info(spider, request, response)
if request.meta.get('dont_cache', False):
if six.PY2:
returnValue(response)
else:
return response
# Skip cached responses and uncacheable requests
if 'cached' in response.flags or '_dont_cache' in request.meta:
request.meta.pop('_dont_cache', None)
if six.PY2:
returnValue(response)
else:
return response
# RFC2616 requires origin server to set Date header,
# http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.18
if 'Date' not in response.headers:
response.headers['Date'] = formatdate(usegmt=1)
# Do not validate first-hand responses
cachedresponse = request.meta.pop('cached_response', None)
if cachedresponse is None:
self.stats.inc_value('httpcache/firsthand', spider=spider)
yield self._cache_response(spider, response, request, cachedresponse)
if six.PY2:
returnValue(response)
else:
return response
if self.policy.is_cached_response_valid(cachedresponse, response, request):
self.stats.inc_value('httpcache/revalidate', spider=spider)
if six.PY2:
returnValue(cachedresponse)
else:
return cachedresponse
self.stats.inc_value('httpcache/invalidate', spider=spider)
yield self._cache_response(spider, response, request, cachedresponse)
if six.PY2:
returnValue(response)
else:
return response
@defer.inlineCallbacks
def process_exception(self, request, exception, spider):
if self.request_error_storage:
yield self._save_request_error(spider, request, exception)
cachedresponse = request.meta.pop('cached_response', None)
if cachedresponse is not None and isinstance(exception, self.DOWNLOAD_EXCEPTIONS):
self.stats.inc_value('httpcache/errorrecovery', spider=spider)
if six.PY2:
returnValue(cachedresponse)
else:
return cachedresponse
@defer.inlineCallbacks
def _cache_response(self, spider, response, request, cachedresponse):
if self.policy.should_cache_response(response, request):
self.stats.inc_value('httpcache/store', spider=spider)
yield self.storage.store_response(spider, request, response)
else:
self.stats.inc_value('httpcache/uncacheable', spider=spider)
@defer.inlineCallbacks
def _save_banned_info(self, spider, request, response):
if response and self.banned_storage:
yield self.banned_storage.save_banned(spider, request, response)
@defer.inlineCallbacks
def _save_request_error(self, spider, request, exception):
if exception and self.request_error_storage:
yield self.request_error_storage.save_request_error(spider, request, exception)
@defer.inlineCallbacks
def remove_banned(self, spider, response, exception, **kwargs):
yield self.storage.remove_response(spider, response.request, response)
self.stats.inc_value('httpcache/store', count=-1, spider=spider)
logger.warning('Remove banned response cache: {}'.format(response.request.url))
| 41.90625 | 115 | 0.665051 |
79560588afee14347a26ce4b52ba1ab136883763 | 30,577 | py | Python | khorosjx/core.py | jeffshurtliff/khorosjx | 1530fad25eb8ccefcbb5a9ae63c09a6858cb033d | [
"MIT"
] | 2 | 2019-11-18T03:52:51.000Z | 2020-12-30T04:08:06.000Z | khorosjx/core.py | jeffshurtliff/khorosjx | 1530fad25eb8ccefcbb5a9ae63c09a6858cb033d | [
"MIT"
] | 2 | 2019-12-17T17:02:30.000Z | 2021-06-02T13:56:02.000Z | khorosjx/core.py | jeffshurtliff/khorosjx | 1530fad25eb8ccefcbb5a9ae63c09a6858cb033d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
:Module: khorosjx.core
:Synopsis: Collection of core functions and tools to work with the Jive Core API v3
:Usage: ``import khorosjx.core`` (Imported by default in primary package)
:Example: ``user_info = khorosjx.core.get_data('people', 'john.doe@example.com', 'email')``
:Created By: Jeff Shurtliff
:Last Modified: Jeff Shurtliff
:Modified Date: 23 Sep 2021
"""
import re
import json
import requests
from . import errors
from .utils.core_utils import eprint, convert_dict_to_json
from .utils.classes import Platform, Content
# Define global variables
base_url, api_credentials = '', None
def set_base_url(domain_url, version=3, protocol='https', return_url=True):
"""This function gets the base URL for API calls when supplied with a domain URL. (e.g. ``community.example.com``)
.. versionchanged:: 3.2.0
Added the ``return_url`` parameter to determine if the base URL should be returned by the function.
:param domain_url: The domain URL of the environment, with or without the http/https prefix
:type domain_url: str
:param version: The version of the REST API to utilize (Default: ``3``)
:type version: int
:param protocol: The protocol to leverage for the domain prefix if not already supplied (Default: ``https``)
:type protocol: str
:param return_url: Determines if the base URL should be returned by the function (``True`` by default)
:type return_url: bool
:returns: The base URL for API calls in string format (e.g. ``https://community.example.com/api/core/v3``)
:raises: :py:exc:`TypeError`, :py:exc:`ValueError`
"""
# Define global variable and dictionaries
global base_url
versions = {
2: '/api/core/v2',
3: '/api/core/v3'
}
protocols = {
80: 'http://',
443: 'https://',
'http': 'http://',
'https': 'https://'
}
# Add the appropriate protocol prefix if not present
if not domain_url.startswith('http'):
domain_url = f"{protocols.get(protocol)}{domain_url}"
# Append the appropriate API path to the URL and return the bse URL
domain_url = re.sub('/$', '', domain_url)
base_url = f"{domain_url}{versions.get(version)}"
if return_url:
return base_url
return
def set_credentials(credentials):
"""This function defines the Core API credentials as global variables and validates them.
.. versionchanged:: 3.1.0
Parenthesis were added to the exception classes and utilized the :py:func:`isinstsance` builtin.
:param credentials: The username and password for the account that will be utilizing the Core API
:type credentials: tuple
:returns: None
:raises: :py:exc:`khorosjx.errors.exceptions.IncompleteCredentialsError`,
:py:exc:`khorosjx.errors.exceptions.CredentialsUnpackingError`,
:py:exc:`khorosjx.errors.exceptions.WrongCredentialTypeError`
"""
# Initialize the global variable
global api_credentials
# Ensure the supplied data can be leveraged and then define the global variable
if len(credentials) != 2:
if len(credentials) == 1:
raise errors.exceptions.IncompleteCredentialsError()
else:
raise errors.exceptions.CredentialsUnpackingError()
elif not isinstance(credentials[0], str) or not isinstance(credentials[1], str):
raise errors.exceptions.WrongCredentialTypeError()
api_credentials = credentials
return
def connect(base_api_url, credentials):
"""This function establishes the connection information for performing Core API queries.
:param base_api_url: The base URL (e.g. https://community.example.com) for for environment
:type base_api_url: str
:param credentials: The username and password of the account to perform the API queries
:type credentials: tuple
:returns: None
"""
set_base_url(base_api_url)
set_credentials(credentials)
return
def verify_connection():
"""This function verifies that the base URL and API credentials have been defined.
.. versionchanged:: 3.1.0
Refactored the function to be more pythonic and to avoid depending on a try/except block.
:returns: None
:raises: :py:exc:`khorosjx.errors.exceptions.KhorosJXError`,
:py:exc:`khorosjx.errors.exceptions.NoCredentialsError`
"""
if not base_url or not api_credentials:
raise errors.exceptions.NoCredentialsError()
return
def get_connection_info():
"""This function returns the connection information (Base URL and API credentials) to use in other modules.
:returns: Base URL in string format and API credentials within a tuple
"""
# Verify that the connection has been established and then return the information
verify_connection()
return base_url, api_credentials
def get_api_info(api_filter="none", verify_ssl=True):
"""This function obtains the API version information for a Jive environment.
.. versionchanged:: 2.6.0
Added the ``verify_ssl`` argument.
:param api_filter: A filter to return a subset of API data (e.g. ``v3``, ``platform``, ``sso``, etc.)
:type api_filter: str
:param verify_ssl: Determines if API calls should verify SSL certificates (``True`` by default)
:type verify_ssl: bool
:returns: API information in JSON, string or list format depending on the filter
"""
# Verify that the connection has been established
verify_connection()
# Get the query URL to use in the API call
query_url = f"{base_url.split('/api')[0]}/api/version"
# Perform GET request to obtain the version information
response = requests.get(query_url, verify=verify_ssl)
api_data = response.json()
# Define the return filters
filters = {
'none': api_data,
'platform': api_data['jiveVersion'],
'v2': api_data['jiveCoreVersions'][0],
'v3': api_data['jiveCoreVersions'][1],
'sso': api_data['ssoEnabled'],
'edition': api_data['jiveEdition'],
'environment': api_data['jiveEdition']['product'],
'tier': api_data['jiveEdition']['tier']
}
# Filter the data to return as necessary
if api_filter.lower() in filters:
try:
api_data = filters.get(api_filter.lower())
except KeyError:
api_data = {}
else:
error_msg = f"The invalid filter '{api_filter}' was provided for the API " + \
f"information. Defaulting to returning all data."
print(error_msg)
return api_data
def get_api_version(api_name="v3", verify_ssl=True):
"""This function obtains, parses and returns the current version of one of the Jive Core APIs.
.. versionchanged:: 3.1.0
Refactored the function to be more efficient.
.. versionchanged:: 2.6.0
Added the ``verify_ssl`` argument.
:param api_name: The name of the API for which the version should be returned (Default: ``v3``)
:type api_name: str
:param verify_ssl: Determines if API calls should verify SSL certificates (``True`` by default)
:type verify_ssl: bool
:returns: The API version in major.minor notation (e.g. 3.15) in string format
"""
# Verify that the connection has been established
verify_connection()
# Ensure that a valid API name was supplied
if api_name not in Platform.core_api_versions:
error_msg = f"The invalid API name '{api_name}' was provided to obtain the API " + \
f"version. Defaulting to v3."
print(error_msg)
api_name = "v3"
# Obtain the API information
api_data = get_api_info(api_name, verify_ssl)
# Parse and return the API version number
api_version = f"{api_data.get('version')}.{api_data.get('revision')}"
return api_version
def get_platform_version(verify_ssl=True):
"""This function obtains the current Khoros JX (or Jive) version for an environment.
.. versionchanged:: 2.6.0
Added the ``verify_ssl`` argument.
:param verify_ssl: Determines if API calls should verify SSL certificates (``True`` by default)
:type verify_ssl: bool
:returns: The full platform version in string format (e.g. ``2018.22.0.0_jx``)
"""
# Verify that the connection has been established
verify_connection()
# Obtain and return the platform version
platform_version = get_api_info('platform', verify_ssl)
return platform_version
def ensure_absolute_url(query_url):
"""This function adds the base URL to the beginning of a query URL if not already present.
.. versionadded:: 3.2.0
:param query_url: The query URL that will be utilized in an API request
:type query_url: str
:returns: The query URL that includes a top-level domain
:raises: :py:exc:`TypeError`
"""
if not base_url:
raise errors.exceptions.MissingBaseUrlError()
if query_url and not query_url.startswith('http'):
query_url = f"{base_url}{query_url}" if query_url.startswith('/') else f"{base_url}/{query_url}"
return query_url
def get_request_with_retries(query_url, return_json=False, verify_ssl=True):
"""This function performs a GET request with a total of 5 retries in case of timeouts or connection issues.
.. versionchanged:: 3.2.0
The query URL is now made into an absolute URL as necessary before performing the API request.
.. versionchanged:: 3.1.0
Refactored the function to be more efficient.
.. versionchanged:: 2.6.0
Added the ``verify_ssl`` argument.
:param query_url: The URI to be queried
:type query_url: str
:param return_json: Determines whether or not the response should be returned in JSON format (Default: ``False``)
:type return_json: bool
:param verify_ssl: Determines if API calls should verify SSL certificates (``True`` by default)
:type verify_ssl: bool
:returns: The API response from the GET request (optionally in JSON format)
:raises: :py:exc:`ValueError`, :py:exc:`TypeError`, :py:exc:`khorosjx.errors.exceptions.APIConnectionError`
"""
# Verify that the connection has been established
verify_connection()
# Prepare the query URL
query_url = ensure_absolute_url(query_url)
# Perform the GET request
retries, response = 0, None
while retries <= 5:
try:
response = requests.get(query_url, auth=api_credentials, verify=verify_ssl)
break
except Exception as e:
current_attempt = f"(Attempt {retries} of 5)"
error_msg = f"The GET request failed with the exception below. {current_attempt}"
print(f"{error_msg}\n{e}\n")
retries += 1
if retries == 6:
failure_msg = "The API call was unable to complete successfully after five consecutive API timeouts " + \
"and/or failures. Please call the function again or contact Khoros Support."
raise errors.exceptions.APIConnectionError(failure_msg)
# Convert to JSON if specified
response = response.json() if return_json else response
return response
def get_base_url(api_base=True):
"""This function returns the base URL of the environment with or without the ``/api/core/v3/`` path appended.
.. versionchanged:: 3.1.0
Refactored the function to properly utilize the ``base_url`` global variable.
:param api_base: Determines if the ``/api/core/v3/`` path should be appended (``True`` by default)
:type api_base: bool
:returns: The base URL for the Khoros JX or Jive-n environment
"""
verify_connection()
global base_url
base_url = '' if not base_url else base_url
if not api_base:
base_url = base_url.split('/api')[0]
return base_url
def get_query_url(pre_endpoint, asset_id="", post_endpoint=""):
"""This function constructs an API query URL excluding any query strings.
.. versionchanged:: 3.1.0
Refactored the function to be more efficient.
:param pre_endpoint: The endpoint portion of the URL preceding any ID numbers (e.g. ``places``)
:type pre_endpoint: str
:param asset_id: The ID for an asset (e.g. User ID, Browse ID for a space/blog, etc.)
:type asset_id: str, int
:param post_endpoint: Any remaining endpoints following the ID number (e.g. ``contents``)
:type post_endpoint: str
:returns: The fully structured query URL
"""
# TODO: Include parameter to make the query URL absolute
# Verify that the connection has been established
verify_connection()
# Construct and return the query URL
if pre_endpoint[-1:] == '/':
pre_endpoint = pre_endpoint[:-1]
query_url = f"{base_url}/{pre_endpoint}"
for section in (asset_id, post_endpoint):
if not isinstance(section, str):
section = str(section)
if section:
if section[-1:] == '/':
section = section[:1]
section = f"/{section}"
query_url += section
return query_url
def get_data(endpoint, lookup_value, identifier='id', return_json=False, ignore_exceptions=False, all_fields=False,
verify_ssl=True):
"""This function returns data for a specific API endpoint.
.. versionchanged:: 3.1.0
Fixed how the ``query_url`` variable is defined to proactively avoid raising any :py:exc:`NameError` exceptions.
.. versionchanged:: 2.6.0
Added the ``verify_ssl`` argument.
:param endpoint: The API endpoint against which to request data (e.g. ``people``, ``contents``, etc.)
:type endpoint: str
:param lookup_value: The value to use to look up the endpoint data
:type lookup_value: int, str
:param identifier: The type of lookup value used to look up the endpoint data (Default: ``id``)
:type identifier: str
:param return_json: Determines if the data should be returned in default or JSON format (Default: ``False``)
:type return_json: bool
:param ignore_exceptions: Determines whether nor not exceptions should be ignored (Default: ``False``)
:type ignore_exceptions: bool
:param all_fields: Determines whether or not the ``fields=@all`` query should be included (Default: ``False``)
:type all_fields: bool
:param verify_ssl: Determines if API calls should verify SSL certificates (``True`` by default)
:type verify_ssl: bool
:returns: The API response either as a requests response or in JSON format depending on the ``return_json`` value
:raises: :py:exc:`khorosjx.errors.exceptions.GETRequestError`
"""
# Verify that the connection has been established
verify_connection()
# Define the endpoint if an appropriate one is supplied
available_endpoints = ['abuseReports', 'acclaim', 'actions', 'activities', 'addOns', 'announcements', 'attachments',
'calendar', 'checkpoints', 'collaborations', 'comments', 'contents', 'deletedObjects', 'dms',
'events', 'eventTypes', 'executeBatch', 'extprops', 'extstreamDefs', 'extstreams',
'ideaVotes', 'images', 'inbox', 'invites', 'members', 'mentions', 'messages', 'moderation',
'oembed', 'outcomes', 'pages', 'people', 'places', 'placeTemplateCategories',
'placeTemplates', 'placeTopics', 'profileImages', 'publications', 'questions', 'rsvp',
'search', 'sections', 'securityGroups', 'shares', 'slides', 'stages', 'statics',
'streamEntries', 'streams', 'tags', 'tileDefs', 'tiles', 'urls', 'versions', 'videos',
'vitals', 'votes', 'webhooks'
]
query_url = f"{base_url}/{endpoint}" if endpoint in available_endpoints else None
if not query_url:
raise errors.exceptions.InvalidEndpointError()
# Define the identifier type for the lookup value
if identifier == "id":
query_url += f"/{lookup_value}"
elif identifier == "email" or identifier == "username":
invalid_endpoint_msg = f"The identifier '{identifier}' is only accepted with the people endpoint."
if endpoint != "people":
raise errors.exceptions.InvalidLookupTypeError(invalid_endpoint_msg)
else:
if identifier == "email":
query_url += f"/email/{lookup_value}"
elif identifier == "username":
query_url += f"/username/{lookup_value}"
else:
unrecognized_endpoint_msg = f"The identifier '{identifier}' is unrecognized."
if not ignore_exceptions:
raise errors.exceptions.InvalidLookupTypeError(unrecognized_endpoint_msg)
unrecognized_endpoint_retry_msg = f"{unrecognized_endpoint_msg} " + \
"The function will attempt to use the default 'id' identifier."
eprint(unrecognized_endpoint_retry_msg)
query_url += f"/{lookup_value}"
# Append the fields=@all query if requested
query_url += "?fields=@all" if all_fields else query_url
# Perform the GET request with retries to account for any timeouts
response = get_request_with_retries(query_url, verify_ssl=verify_ssl)
# Error out if the response isn't successful
if response.status_code != 200:
error_msg = f"The query failed with a {response.status_code} status code and the following error: " + \
f"{response.text}"
if ignore_exceptions:
print(error_msg)
if return_json:
empty_json = {}
response = convert_dict_to_json(empty_json)
else:
raise errors.exceptions.GETRequestError(error_msg)
response = response.json() if return_json else response
return response
def _api_request_with_payload(_url, _json_payload, _request_type, _verify_ssl=True):
"""This function performs an API request while supplying a JSON payload.
.. versionchanged:: 3.2.0
The query URL is now made into an absolute URL as necessary before performing the API request.
.. versionchanged:: 3.1.0
Included the name of the raised exception in the error message.
.. versionchanged:: 2.6.0
Added the ``_verify_ssl`` argument.
:param _url: The query URL to be leveraged in the API call
:type _url: str
:param _json_payload: The payload for the API call in JSON format
:type _json_payload: dict
:param _request_type: Defines if the API call will be a ``put`` or ``post`` request
:type _request_type: str
:param _verify_ssl: Determines if API calls should verify SSL certificates (``True`` by default)
:type _verify_ssl: bool
:returns: The API response
:raises: :py:exc:`khorosjx.errors.exceptions.InvalidRequestTypeError`,
:py:exc:`khorosjx.errors.exceptions.APIConnectionError`
"""
# Prepare the query URL
_url = ensure_absolute_url(_url)
# Perform the API request
_retries, _response = 0, None
while _retries <= 5:
try:
_headers = {"Content-Type": "application/json", "Accept": "application/json"}
if _request_type.lower() == "put":
_response = requests.put(_url, data=json.dumps(_json_payload, default=str), auth=api_credentials,
headers=_headers, verify=_verify_ssl)
elif _request_type.lower() == "post":
_response = requests.post(_url, data=json.dumps(_json_payload, default=str), auth=api_credentials,
headers=_headers, verify=_verify_ssl)
else:
raise errors.exceptions.InvalidRequestTypeError()
break
except Exception as _api_exception:
_exc_type = type(_api_exception).__name__
_current_attempt = f"(Attempt {_retries} of 5)"
_error_msg = f"The {_request_type.upper()} request has failed with the following exception: " + \
f"{_exc_type} - {_api_exception} {_current_attempt}"
print(_error_msg)
_retries += 1
pass
if _retries == 6:
_failure_msg = "The script was unable to complete successfully after five consecutive API timeouts. " + \
"Please run the script again or contact Khoros or Aurea Support for further assistance."
raise errors.exceptions.APIConnectionError(_failure_msg)
return _response
def post_request_with_retries(url, json_payload, verify_ssl=True):
"""This function performs a POST request with a total of 5 retries in case of timeouts or connection issues.
.. versionchanged:: 3.2.0
The query URL is now made into an absolute URL as necessary before performing the API request.
.. versionchanged:: 2.6.0
Added the ``verify_ssl`` argument.
:param url: The URI to be queried
:type url: str
:param json_payload: The payload for the POST request in JSON format
:type json_payload: dict
:param verify_ssl: Determines if API calls should verify SSL certificates (``True`` by default)
:type verify_ssl: bool
:returns: The API response from the POST request
:raises: :py:exc:`ValueError`, :py:exc:`khorosjx.errors.exceptions.APIConnectionError`,
:py:exc:`khorosjx.errors.exceptions.POSTRequestError`
"""
url = ensure_absolute_url(url)
response = _api_request_with_payload(url, json_payload, 'post', verify_ssl)
return response
def put_request_with_retries(url, json_payload, verify_ssl=True):
"""This function performs a PUT request with a total of 5 retries in case of timeouts or connection issues.
.. versionchanged:: 3.2.0
The query URL is now made into an absolute URL as necessary before performing the API request.
.. versionchanged:: 2.6.0
Added the ``verify_ssl`` argument.
:param url: The URI to be queried
:type url: str
:param json_payload: The payload for the PUT request in JSON format
:type json_payload: dict
:param verify_ssl: Determines if API calls should verify SSL certificates (``True`` by default)
:type verify_ssl: bool
:returns: The API response from the PUT request
:raises: :py:exc:`ValueError`, :py:exc:`khorosjx.errors.exceptions.APIConnectionError`,
:py:exc:`khorosjx.errors.exceptions.PUTRequestError`
"""
url = ensure_absolute_url(url)
response = _api_request_with_payload(url, json_payload, 'put', verify_ssl)
return response
def delete(uri, return_json=False, verify_ssl=True):
"""This function performs a DELETE request against the Core API.
.. versionchanged:: 3.2.0
The query URL is now made into an absolute URL as necessary before performing the API request.
.. versionchanged:: 2.6.0
Added the ``verify_ssl`` argument.
:param uri: The URI against which the DELETE request will be issued
:type uri: str
:param return_json: Determines whether or not the response should be returned in JSON format (Default: ``False``)
:type return_json: bool
:param verify_ssl: Determines if API calls should verify SSL certificates (``True`` by default)
:type verify_ssl: bool
:returns: The API response from the DELETE request (optionally in JSON format)
"""
uri = ensure_absolute_url(uri)
response = requests.delete(uri, auth=api_credentials, verify=verify_ssl)
if return_json:
response = response.json()
return response
def get_fields_from_api_response(json_data, dataset, return_fields=None, quiet=False):
"""This function parses and retrieves fields from an API response from a specific dataset.
.. versionchanged:: 3.1.0
Changed the default ``return_fields`` value to ``None`` and adjusted the function accordingly.
.. versionchanged:: 2.6.0
Added conditional to ensure ``quiet`` is ``False`` before calling the ``stderr`` print statement.
.. versionchanged:: 2.5.3
Fixed the ``email.value`` filter and added the optional ``quiet`` argument.
:param json_data: The JSON data from an API response
:type json_data: dict
:param dataset: The nickname of a dataset from which fields should be retrieved (e.g. ``people``, ``group_admins``)
:type dataset: str
:param return_fields: The fields that should be returned from the API response (Default: all fields in dataset)
:type return_fields: list, None
:param quiet: Silences any errors about being unable to locate API fields (``False`` by default)
:type quiet: bool
:returns: A dictionary with the field names and corresponding values
:raises: :py:exc:`khorosjx.errors.exceptions.InvalidDatasetError`
"""
# Define the empty dictionary for data to return
fields_data = {}
# Define the fields that should be returned for the data
fields_to_return = return_fields if return_fields else []
if not fields_to_return:
# Get the default return fields for the dataset
if dataset not in Content.datasets:
error_msg = f"The supplied value '{dataset}' is not a valid dataset."
raise errors.exceptions.InvalidDatasetError(error_msg)
fields_to_return = Content.datasets.get(dataset)
# Get and return the fields and corresponding values
for field in fields_to_return:
field_not_found = False
try:
if field in json_data:
fields_data[field] = json_data[field]
elif field == "email.value":
fields_data[field] = json_data['emails'][0]['value']
elif field == "name.formatted":
fields_data[field] = json_data['name']['formatted']
elif field == "jive.lastAuthenticated":
fields_data[field] = json_data['jive']['lastAuthenticated']
elif field == "jive.externalIdentities.identityType":
fields_data[field] = json_data['jive']['externalIdentities'][0]['identityType']
elif field == "jive.externalIdentities.identity":
fields_data[field] = json_data['jive']['externalIdentities'][0]['identity']
elif field == "jive.username":
fields_data[field] = json_data['jive']['username']
elif field == "jive.status":
fields_data[field] = json_data['jive']['status']
elif field == "resources.html.ref":
fields_data[field] = json_data['resources']['html']['ref']
else:
field_not_found = True
except (IndexError, KeyError):
field_not_found = True
if field_not_found and not quiet:
eprint(f"Unable to locate the '{field}' field in the API response data.")
return fields_data
def _get_filter_syntax(_filter_info, _prefix=True):
"""This function retrieves the proper filter syntax for an API call."""
if type(_filter_info) != tuple and type(_filter_info) != list:
raise TypeError("Filter information must be provided as a tuple (element, criteria) or a list of tuples.")
elif type(_filter_info) == tuple:
_filter_info = [_filter_info]
_syntax = ""
if len(_filter_info[0]) > 0:
_define_prefix = {True: '&', False: ''}
_syntax_prefix = _define_prefix.get(_prefix)
for _filter_tuple in _filter_info:
_element, _criteria = _filter_tuple
_syntax = f"{_syntax_prefix}filter={_element}({_criteria})&"
_syntax = _syntax[:-1]
return _syntax
def get_paginated_results(query, response_data_type, start_index=0, filter_info=(), query_all=True,
return_fields=None, ignore_exceptions=False, quiet=False, verify_ssl=True):
"""This function performs a GET request for a single paginated response up to 100 records.
.. versionchanged:: 3.1.0
Changed the default ``return_fields`` value to ``None`` and adjusted the function accordingly.
.. versionchanged:: 2.6.0
Added the ``verify_ssl`` argument.
:param query: The API query without the query string
:type query: str
:param response_data_type: The dataset of fields that will be in the API response (e.g. ``group_members``)
:type response_data_type: str
:param start_index: The startIndex value in the API query string (``0`` by default)
:type start_index: int, str
:param filter_info: A tuple of list of tuples containing the filter element and criteria (Optional)
:type filter_info: tuple, list
:param query_all: Determines if ``fields=@all`` filter should be included in the query string (Default: ``True``)
:type query_all: bool
:param return_fields: The fields that should be returned from the API response (Default: all fields in dataset)
:type return_fields: list, None
:param ignore_exceptions: Determines whether nor not exceptions should be ignored (Default: ``False``)
:type ignore_exceptions: bool
:param quiet: Silences any errors about being unable to locate API fields (``False`` by default)
:type quiet: bool
:param verify_ssl: Determines if API calls should verify SSL certificates (``True`` by default)
:type verify_ssl: bool
:returns: The queried data as a list comprised of dictionaries
:raises: :py:exc:`khorosjx.errors.exceptions.GETRequestError`
"""
# Initialize the empty list for the aggregate data
aggregate_data = []
# Construct the full API query
fields_filter = ""
if query_all:
fields_filter = "fields=@all&"
if '?' in query:
# Strip out the query string if present to prevent interference with the query string to be added
query = query.split("?")[0]
other_filters = _get_filter_syntax(filter_info, _prefix=True)
full_query = f"{query}?{fields_filter}count=100&startIndex={start_index}{other_filters}"
# Perform the API query to retrieve the information
response = get_request_with_retries(full_query, verify_ssl=verify_ssl)
# Verify that the query was successful
successful_response = errors.handlers.check_api_response(response, ignore_exceptions=ignore_exceptions)
if successful_response:
# Get the response data in JSON format
paginated_data = response.json()
for data in paginated_data['list']:
# Parse and append the data
parsed_data = get_fields_from_api_response(data, response_data_type, return_fields, quiet)
aggregate_data.append(parsed_data)
return aggregate_data
| 43.495021 | 120 | 0.673742 |
795607511277fd96f6f2ababcdbcf446e0842ce8 | 13,941 | py | Python | deprecated/python/soappy/ncbiblast_soappy.py | SamFent/webservice-clients | b4c1ab0d4e0535cc8e79a0d5e731aaafef3193f2 | [
"Apache-2.0"
] | null | null | null | deprecated/python/soappy/ncbiblast_soappy.py | SamFent/webservice-clients | b4c1ab0d4e0535cc8e79a0d5e731aaafef3193f2 | [
"Apache-2.0"
] | null | null | null | deprecated/python/soappy/ncbiblast_soappy.py | SamFent/webservice-clients | b4c1ab0d4e0535cc8e79a0d5e731aaafef3193f2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# ======================================================================
#
# Copyright 2008-2018 EMBL - European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ======================================================================
# NCBI BLAST SOAP service, Python client using SOAPpy.
#
# Tested with:
# Python 2.5.1 with SOAPpy 0.11.3
# Python 2.5.2 with SOAPpy 0.12.0 (Ubuntu 8.04 LTS)
# Python 2.6.5 with SOAPpy 0.12.0 (Ubuntu 10.04 LTS)
# Python 2.7.3 with SOAPpy 0.12.0 (Ubuntu 12.04 LTS)
#
# See:
# http://www.ebi.ac.uk/Tools/webservices/services/sss/ncbi_blast_soap
# http://www.ebi.ac.uk/Tools/webservices/tutorials/python
# ======================================================================
# Load libraries
import base64, platform, os, SOAPpy, sys, time
import warnings
from SOAPpy import WSDL
from optparse import OptionParser
# WSDL URL for service
wsdlUrl = 'http://www.ebi.ac.uk/Tools/services/soap/ncbiblast?wsdl'
# Suppress all deprecation warnings (not recommended for development)
warnings.simplefilter('ignore', DeprecationWarning)
# Set interval for checking status
checkInterval = 3
# Output level
outputLevel = 1
# Debug level
debugLevel = 0
# Number of option arguments.
numOpts = len(sys.argv)
# Usage message
usage = "Usage: %prog [options...] [seqFile]"
description = """Rapid sequence database search programs utilizing the BLAST algorithm. For more information
on NCBI BLAST refer to http://www.ebi.ac.uk/Tools/sss/ncbiblast"""
epilog = """For further information about the NCBI BLAST (SOAP) web service, see http://www.ebi.ac.uk/Tools/webservices/services/sss/ncbi_blast_soap.
"""
version = "$Id$"
# Process command-line options
parser = OptionParser(usage=usage, description=description, epilog=epilog, version=version)
# Tool specific options
parser.add_option('-p', '--program', help='program to run')
parser.add_option('-D', '--database', help='database to search')
parser.add_option('--stype', default='protein', help='query sequence type')
parser.add_option('-m', '--matrix', help='scoring matrix')
parser.add_option('-E', '--exp', type='float', help='E-value threshold')
parser.add_option('-f', '--filter', action="store_true", help='low complexity sequence filter')
parser.add_option('-n', '--alignments', type='int', help='maximum number of alignments')
parser.add_option('-s', '--scores', type='int', help='maximum number of scores')
parser.add_option('-d', '--dropoff', type='int', help='dropoff score')
parser.add_option('--match_score', help='match/missmatch score')
parser.add_option('-o', '--gapopen', type='int', help='open gap penalty')
parser.add_option('-x', '--gapext', type='int', help='extend gap penalty')
parser.add_option('-g', '--gapalign', action="store_true", help='optimise gap alignments')
parser.add_option('--compstats', help='compositional adjustment/statistics mode')
parser.add_option('--seqrange', help='region within input to use as query')
parser.add_option('--sequence', help='input sequence file name')
# General options
parser.add_option('--email', help='e-mail address')
parser.add_option('--title', help='job title')
parser.add_option('--outfile', help='file name for results')
parser.add_option('--outformat', help='output format for results')
parser.add_option('--async', action='store_true', help='asynchronous mode')
parser.add_option('--jobid', help='job identifier')
parser.add_option('--polljob', action="store_true", help='get job result')
parser.add_option('--status', action="store_true", help='get job status')
parser.add_option('--resultTypes', action='store_true', help='get result types')
parser.add_option('--params', action='store_true', help='list input parameters')
parser.add_option('--paramDetail', help='get details for parameter')
parser.add_option('--quiet', action='store_true', help='decrease output level')
parser.add_option('--verbose', action='store_true', help='increase output level')
parser.add_option('--trace', action="store_true", help='show SOAP messages')
parser.add_option('--WSDL', default=wsdlUrl, help='WSDL URL for service')
parser.add_option('--debugLevel', type='int', default=debugLevel, help='debug output level')
(options, args) = parser.parse_args()
# Increase output level
if options.verbose:
outputLevel += 1
# Decrease output level
if options.quiet:
outputLevel -= 1
# Debug level
if options.debugLevel:
debugLevel = options.debugLevel
# Debug print
def printDebugMessage(functionName, message, level):
if(level <= debugLevel):
print >>sys.stderr, '[' + functionName + '] ' + message
# Get input parameters list
def serviceGetParameters():
printDebugMessage('serviceGetParameters', 'Begin', 1)
result = server.getParameters()
printDebugMessage('serviceGetParameters', 'End', 1)
return result
# Get input parameter information
def serviceGetParameterDetails(paramName):
printDebugMessage('serviceGetParameterDetails', 'Begin', 1)
result= server.getParameterDetails(parameterId=paramName)
printDebugMessage('serviceGetParameterDetails', 'End', 1)
return result
# Submit job
def serviceRun(email, title, params):
printDebugMessage('serviceRun', 'Begin', 1)
jobid = server.run(email=email, title=title, parameters=params)
printDebugMessage('serviceRun', 'End', 1)
return jobid
# Get job status
def serviceCheckStatus(jobId):
printDebugMessage('serviceCheckStatus', 'jobId: ' + jobId, 1)
result = server.getStatus(jobId = jobId)
return result
# Get available result types for job
def serviceGetResultTypes(jobId):
printDebugMessage('serviceGetResultTypes', 'Begin', 1)
result = server.getResultTypes(jobId=jobId)
printDebugMessage('serviceGetResultTypes', 'End', 1)
return result['type']
# Get result
def serviceGetResult(jobId, type):
printDebugMessage('serviceGetResult', 'Begin', 1)
printDebugMessage('serviceGetResult', 'jobId: ' + jobId, 1)
printDebugMessage('serviceGetResult', 'type: ' + type, 1)
resultBase64 = server.getResult(jobId=jobId, type=type)
result = base64.decodestring(resultBase64)
printDebugMessage('serviceGetResult', 'End', 1)
return result
# Client-side poll
def clientPoll(jobId):
printDebugMessage('clientPoll', 'Begin', 1)
result = 'PENDING'
while result == 'RUNNING' or result == 'PENDING':
result = serviceCheckStatus(jobId)
print >>sys.stderr, result
if result == 'RUNNING' or result == 'PENDING':
time.sleep(15)
printDebugMessage('clientPoll', 'End', 1)
# Get result for a jobid
def getResult(jobId):
printDebugMessage('getResult', 'Begin', 1)
printDebugMessage('getResult', 'jobId: ' + jobId, 1)
# Check status and wait if necessary
clientPoll(jobId)
# Get available result types
resultTypes = serviceGetResultTypes(jobId)
for resultType in resultTypes:
# Get the result
result = serviceGetResult(jobId, resultType['identifier'])
# Derive the filename for the result
if options.outfile:
filename = options.outfile + '.' + resultType['identifier'] + '.' + resultType['fileSuffix']
else:
filename = jobId + '.' + resultType['identifier'] + '.' + resultType['fileSuffix']
# Write a result file
if not options.outformat or options.outformat == resultType['identifier']:
fh = open(filename, 'w');
fh.write(result)
fh.close()
print filename
printDebugMessage('getResult', 'End', 1)
# Read a file
def readFile(filename):
printDebugMessage('readFile', 'Begin', 1)
fh = open(filename, 'r')
data = fh.read()
fh.close()
printDebugMessage('readFile', 'End', 1)
return data
# Output parameter details.
def printGetParameterDetails(paramName):
printDebugMessage('printGetParameterDetails', 'Begin', 1)
paramDetail = serviceGetParameterDetails(paramName)
print paramDetail['name'], "\t", paramDetail['type']
print paramDetail['description']
for value in paramDetail['values']['value']:
print value['value'],
if(value['defaultValue'] == 'true'):
print '(default)',
print
print "\t", value['label']
if(hasattr(value, 'properties')):
if(isinstance(value['properties']['property'], (list, tuple))):
for wsProperty in value['properties']['property']:
print "\t", wsProperty['key'], "\t", wsProperty['value']
else:
print "\t", value['properties']['property']['key'], "\t", value['properties']['property']['value']
printDebugMessage('printGetParameterDetails', 'End', 1)
# Output available result types for job.
def printGetResultTypes(jobId):
printDebugMessage('printGetResultTypes', 'Begin', 1)
for resultType in serviceGetResultTypes(jobId):
print resultType['identifier']
if(hasattr(resultType, 'label')):
print "\t", resultType['label']
if(hasattr(resultType, 'description')):
print "\t", resultType['description']
if(hasattr(resultType, 'mediaType')):
print "\t", resultType['mediaType']
if(hasattr(resultType, 'fileSuffix')):
print "\t", resultType['fileSuffix']
printDebugMessage('printGetResultTypes', 'End', 1)
# Set the client user-agent.
clientRevision = '$Revision$'
clientVersion = '0'
if len(clientRevision) > 11:
clientVersion = clientRevision[11:-2]
userAgent = 'EBI-Sample-Client/%s (%s; Python %s; %s) %s' % (
clientVersion, os.path.basename( __file__ ),
platform.python_version(), platform.system(),
SOAPpy.Client.SOAPUserAgent()
)
# Function to return User-agent.
def SOAPUserAgent():
return userAgent
# Redefine default User-agent function to return custom User-agent.
SOAPpy.Client.SOAPUserAgent = SOAPUserAgent
printDebugMessage('main', 'User-agent: ' + SOAPpy.Client.SOAPUserAgent(), 1)
# Create the service interface
printDebugMessage('main', 'WSDL: ' + options.WSDL, 1)
server = WSDL.Proxy(options.WSDL)
# Fix message namespace (not set from the WSDL).
for method in server.methods:
if server.methods[method].namespace == None:
server.methods[method].namespace = 'http://soap.jdispatcher.ebi.ac.uk'
# Configure HTTP proxy from OS environment (e.g. http_proxy="http://proxy.example.com:8080")
if os.environ.has_key('http_proxy'):
http_proxy_conf = os.environ['http_proxy'].replace('http://', '')
elif os.environ.has_key('HTTP_PROXY'):
http_proxy_conf = os.environ['HTTP_PROXY'].replace('http://', '')
else:
http_proxy_conf = None
server.soapproxy.http_proxy = http_proxy_conf
# If required enable SOAP message trace
if options.trace:
server.soapproxy.config.dumpSOAPOut = 1
server.soapproxy.config.dumpSOAPIn = 1
# No options... print help.
if numOpts < 2:
parser.print_help()
# List parameters
elif options.params:
for paramName in serviceGetParameters()['id']:
print paramName
# Get parameter details
elif options.paramDetail:
printGetParameterDetails(options.paramDetail)
# Submit job
elif options.email and not options.jobid:
params = {}
if len(args) > 0:
if os.access(args[0], os.R_OK): # Read file into content
params['sequence'] = readFile(args[0])
else: # Argument is a sequence id
params['sequence'] = args[0]
elif options.sequence: # Specified via option
if os.access(options.sequence, os.R_OK): # Read file into content
params['sequence'] = readFile(options.sequence)
else: # Argument is a sequence id
params['sequence'] = options.sequence
# Booleans need to be represented as 1/0 rather than True/False
if options.gapalign is not None:
if options.gapalign:
params['gapalign'] = 1
else:
params['gapalign'] = 0
# Add the other options (if defined)
if options.program:
params['program'] = options.program
if options.database:
params['database'] = {'string':options.database}
if options.stype:
params['stype'] = options.stype
if options.matrix:
params['matrix'] = options.matrix
if options.exp:
params['exp'] = options.exp
if options.filter:
params['filter'] = options.filter
if options.alignments:
params['alignments'] = options.alignments
if options.scores:
params['scores'] = options.scores
if options.dropoff:
params['dropoff'] = options.dropoff
if options.match_score:
params['match_score'] = options.match_score
if options.gapopen:
params['gapopen'] = options.gapopen
if options.gapext:
params['gapext'] = options.gapext
if options.compstats:
params['compstats'] = options.compstats
# Submit the job
jobid = serviceRun(options.email, options.title, params)
if options.async: # Async mode
print jobid
else: # Sync mode
time.sleep(5)
getResult(jobid)
# Get job status
elif options.status and options.jobid:
status = serviceCheckStatus(options.jobid)
print status
# List result types for job
elif options.resultTypes and options.jobid:
printGetResultTypes(options.jobid)
# Get results for job
elif options.polljob and options.jobid:
getResult(options.jobid)
else:
print 'Error: unrecognised argument combination'
parser.print_help()
| 39.160112 | 149 | 0.681874 |
79560760a9f941e6ec308ffa08f4124ef34c7c8d | 856 | py | Python | line_bot/urls.py | andy89923/Fuelator-Public | 5386f8cc10a4f03cdacdb95eb2eb9eb21a2a3693 | [
"MIT"
] | null | null | null | line_bot/urls.py | andy89923/Fuelator-Public | 5386f8cc10a4f03cdacdb95eb2eb9eb21a2a3693 | [
"MIT"
] | null | null | null | line_bot/urls.py | andy89923/Fuelator-Public | 5386f8cc10a4f03cdacdb95eb2eb9eb21a2a3693 | [
"MIT"
] | null | null | null | """line_bot URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
import echobot
urlpatterns = [
path('admin/', admin.site.urls),
url(r'^echobot/', include('echobot.urls')),
]
| 32.923077 | 77 | 0.71028 |
795607e5df579f6bfd8589bd8578d8092e56133d | 1,271 | py | Python | parakeet/frontend/normalizer/normalizer.py | zh794390558/DeepSpeech | 34178893327ad359cb816e55d7c66a10244fa08a | [
"Apache-2.0"
] | 501 | 2020-02-28T12:46:59.000Z | 2022-03-29T19:49:52.000Z | parakeet/frontend/normalizer/normalizer.py | zh794390558/DeepSpeech | 34178893327ad359cb816e55d7c66a10244fa08a | [
"Apache-2.0"
] | 75 | 2020-03-24T04:40:41.000Z | 2021-11-19T02:18:30.000Z | parakeet/frontend/normalizer/normalizer.py | zh794390558/DeepSpeech | 34178893327ad359cb816e55d7c66a10244fa08a | [
"Apache-2.0"
] | 79 | 2020-03-11T01:50:26.000Z | 2022-03-20T09:37:07.000Z | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import unicodedata
from builtins import str as unicode
from parakeet.frontend.normalizer.numbers import normalize_numbers
def normalize(sentence):
""" Normalize English text.
"""
# preprocessing
sentence = unicode(sentence)
sentence = normalize_numbers(sentence)
sentence = ''.join(
char for char in unicodedata.normalize('NFD', sentence)
if unicodedata.category(char) != 'Mn') # Strip accents
sentence = sentence.lower()
sentence = re.sub(r"[^ a-z'.,?!\-]", "", sentence)
sentence = sentence.replace("i.e.", "that is")
sentence = sentence.replace("e.g.", "for example")
return sentence
| 36.314286 | 74 | 0.715972 |
795609154b33236b70f10efce21a0c7c6e1856ff | 12,094 | py | Python | tests/test_detection.py | Lucas-Prates/ruptures | 9685818d08ca024c0abb6ecf6121f2f86fb26dba | [
"BSD-2-Clause"
] | 942 | 2018-01-20T20:11:43.000Z | 2022-03-29T09:30:29.000Z | tests/test_detection.py | Lucas-Prates/ruptures | 9685818d08ca024c0abb6ecf6121f2f86fb26dba | [
"BSD-2-Clause"
] | 171 | 2018-03-08T18:15:32.000Z | 2022-03-31T18:36:00.000Z | tests/test_detection.py | Lucas-Prates/ruptures | 9685818d08ca024c0abb6ecf6121f2f86fb26dba | [
"BSD-2-Clause"
] | 144 | 2018-03-05T20:54:36.000Z | 2022-03-31T08:59:54.000Z | from copy import deepcopy
from itertools import product
import numpy as np
import pytest
from ruptures.costs import CostAR
from ruptures.datasets import pw_constant
from ruptures.detection import Binseg, BottomUp, Dynp, Pelt, Window, KernelCPD
from ruptures.exceptions import BadSegmentationParameters
@pytest.fixture(scope="module")
def signal_bkps_5D_n10():
signal, bkps = pw_constant(n_samples=10, n_features=5, noise_std=1)
return signal, bkps
@pytest.fixture(scope="module")
def signal_bkps_5D():
signal, bkps = pw_constant(n_features=5, noise_std=1)
return signal, bkps
@pytest.fixture(scope="module")
def signal_bkps_1D():
signal, bkps = pw_constant(noise_std=1)
return signal.astype(np.float32), bkps
@pytest.fixture(scope="module")
def signal_bkps_5D_no_noise():
signal, bkps = pw_constant(n_features=5, noise_std=0)
return signal, bkps
@pytest.fixture(scope="module")
def signal_bkps_1D_no_noise():
signal, bkps = pw_constant(noise_std=0)
return signal, bkps
@pytest.fixture(scope="module")
def signal_bkps_1D_constant():
signal, bkps = np.zeros(200), [200]
return signal, bkps
@pytest.mark.parametrize("algo", [Binseg, BottomUp, Dynp, Pelt, Window])
def test_empty(signal_bkps_1D, algo):
signal, _ = signal_bkps_1D
algo().fit(signal).predict(1)
algo().fit_predict(signal, 1)
@pytest.mark.parametrize(
"algo, model",
product(
[Binseg, BottomUp, Window],
["l1", "l2", "ar", "normal", "rbf", "rank", "mahalanobis"],
),
)
def test_model_1D(signal_bkps_1D, algo, model):
signal, _ = signal_bkps_1D
algo(model=model).fit_predict(signal, pen=1)
ret = algo(model=model).fit_predict(signal, n_bkps=1)
assert len(ret) == 2
assert ret[-1] == signal.shape[0]
algo(model=model).fit_predict(signal, epsilon=10)
@pytest.mark.parametrize(
"algo, model",
product([Dynp, Pelt], ["l1", "l2", "ar", "normal", "rbf", "rank", "mahalanobis"]),
)
def test_model_1D_bis(signal_bkps_1D, algo, model):
signal, _ = signal_bkps_1D
algo_t = algo(model=model)
ret = algo_t.fit_predict(signal, 1)
if isinstance(algo_t, Dynp):
assert len(ret) == 2
assert ret[-1] == signal.shape[0]
@pytest.mark.parametrize(
"algo, model",
product(
[Dynp, Binseg, BottomUp, Window, Pelt],
["l1", "l2", "ar", "normal", "rbf", "rank"],
),
)
def test_model_1D_constant(signal_bkps_1D_constant, algo, model):
signal, _ = signal_bkps_1D_constant
algo = algo(model=model)
if isinstance(algo, Dynp) or isinstance(algo, BottomUp) or isinstance(algo, Binseg):
ret = algo.fit_predict(signal=signal, n_bkps=1)
# Even with constant signals, return the specified number of
# change-points.
assert len(ret) == 2
if isinstance(algo, Window):
ret = algo.fit_predict(signal=signal, n_bkps=1)
# With constant signal, this search method returns 0 change-point.
assert len(ret) == 1
if isinstance(algo, Pelt):
ret = algo.fit_predict(signal=signal, pen=1)
# With constant signal, this search method returns 0 change-point.
assert len(ret) == 1
assert ret[-1] == signal.shape[0], "The last change-point is equal to"
" n_samples."
@pytest.mark.parametrize("algo", [Binseg, Window])
def test_costnormal_on_constant_old_behaviour(signal_bkps_1D_constant, algo):
signal, _ = signal_bkps_1D_constant
algo = algo(model="normal", params={"add_small_diag": False})
ret = algo.fit_predict(signal=signal, n_bkps=2)
# With constant signal, this search method returns 0 change-point.
assert len(ret) == 1
# The last change-point is equal to n_samples.
assert ret[-1] == signal.shape[0], "The last change-point is equal to"
" n_samples."
@pytest.mark.parametrize(
"algo, model",
product(
[Binseg, BottomUp, Window],
["l1", "l2", "linear", "normal", "rbf", "rank", "mahalanobis"],
),
)
def test_model_5D(signal_bkps_5D, algo, model):
signal, _ = signal_bkps_5D
algo(model=model).fit_predict(signal, pen=1)
ret = algo(model=model).fit_predict(signal, n_bkps=1)
assert len(ret) == 2
algo(model=model).fit_predict(signal, epsilon=10)
@pytest.mark.parametrize(
"algo, model",
product(
[Dynp, Pelt],
["l1", "l2", "linear", "normal", "rbf", "rank", "mahalanobis"],
),
)
def test_model_5D_bis(signal_bkps_5D, algo, model):
signal, _ = signal_bkps_5D
algo_t = algo(model=model)
ret = algo_t.fit_predict(signal, 1)
if isinstance(algo_t, Dynp):
assert len(ret) == 2
@pytest.mark.parametrize("algo", [Binseg, BottomUp, Window, Dynp, Pelt])
def test_custom_cost(signal_bkps_1D, algo):
signal, _ = signal_bkps_1D
c = CostAR(order=10)
algo_t = algo(custom_cost=c)
ret = algo_t.fit_predict(signal, 1)
if isinstance(algo_t, Pelt):
assert len(ret) >= 2
else:
assert len(ret) == 2
@pytest.mark.parametrize("algo", [Binseg, BottomUp, Window, Dynp, Pelt])
def test_pass_param_to_cost(signal_bkps_1D, algo):
signal, _ = signal_bkps_1D
algo_t = algo(model="ar", params={"order": 10})
ret = algo_t.fit_predict(signal, 1)
if isinstance(algo_t, Pelt):
assert len(ret) >= 2
else:
assert len(ret) == 2
@pytest.mark.parametrize(
"kernel, min_size",
product(["linear"], [2, 5]),
)
def test_kernelcpd_1D_linear(signal_bkps_1D, kernel, min_size):
signal, bkps = signal_bkps_1D
ret = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1)
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert len(ret) == len(bkps)
@pytest.mark.parametrize(
"kernel, min_size",
product(["linear"], [2, 5]),
)
def test_kernelcpd_5D_linear(signal_bkps_5D, kernel, min_size):
signal, bkps = signal_bkps_5D
ret = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1)
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert len(ret) == len(bkps)
@pytest.mark.parametrize(
"kernel, min_size",
product(["rbf"], [2, 5]),
)
def test_kernelcpd_1D_rbf(signal_bkps_1D, kernel, min_size):
signal, bkps = signal_bkps_1D
ret = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1, params={"gamma": 1.5})
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert len(ret) == len(bkps)
@pytest.mark.parametrize(
"kernel, min_size",
product(["rbf"], [2, 5]),
)
def test_kernelcpd_5D_rbf(signal_bkps_5D, kernel, min_size):
signal, bkps = signal_bkps_5D
ret = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1, params={"gamma": 1.5})
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert len(ret) == len(bkps)
@pytest.mark.parametrize(
"kernel, min_size",
product(["linear"], [2, 5]),
)
def test_kernelcpd_1D_no_noise_linear(signal_bkps_1D_no_noise, kernel, min_size):
signal, bkps = signal_bkps_1D_no_noise
res = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1)
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert res == bkps
@pytest.mark.parametrize(
"kernel, min_size",
product(["linear"], [2, 5]),
)
def test_kernelcpd_5D_no_noise_linear(signal_bkps_5D_no_noise, kernel, min_size):
signal, bkps = signal_bkps_5D_no_noise
res = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1)
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert res == bkps
@pytest.mark.parametrize(
"kernel, min_size",
product(["rbf"], [2, 5]),
)
def test_kernelcpd_1D_no_noise_rbf(signal_bkps_1D_no_noise, kernel, min_size):
signal, bkps = signal_bkps_1D_no_noise
res = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1, params={"gamma": 1.5})
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert res == bkps
@pytest.mark.parametrize(
"kernel, min_size",
product(["rbf"], [2, 5]),
)
def test_kernelcpd_5D_no_noise_rbf(signal_bkps_5D_no_noise, kernel, min_size):
signal, bkps = signal_bkps_5D_no_noise
res = (
KernelCPD(kernel=kernel, min_size=min_size, jump=1, params={"gamma": 1.5})
.fit(signal)
.predict(n_bkps=len(bkps) - 1)
)
assert res == bkps
# Exhaustive test of KernelCPD
@pytest.mark.parametrize("kernel", ["linear", "rbf", "cosine"])
def test_kernelcpd(signal_bkps_5D, kernel):
signal, bkps = signal_bkps_5D
# Test we do not compute if intermediary results exist
algo_temp = KernelCPD(kernel=kernel)
algo_temp.fit(signal).predict(n_bkps=len(bkps) - 1)
algo_temp.predict(n_bkps=1)
# Test penalized version
KernelCPD(kernel=kernel).fit(signal).predict(pen=0.2)
# Test fit_predict
KernelCPD(kernel=kernel).fit_predict(signal, pen=0.2)
@pytest.mark.parametrize("kernel", ["linear", "rbf", "cosine"])
def test_kernelcpd_small_signal(signal_bkps_5D_n10, kernel):
signal, _ = signal_bkps_5D_n10
algo_temp = KernelCPD(kernel=kernel)
with pytest.raises(BadSegmentationParameters):
KernelCPD(kernel=kernel, min_size=10, jump=2).fit_predict(signal, n_bkps=2)
with pytest.raises(AssertionError):
KernelCPD(kernel=kernel, min_size=10, jump=2).fit_predict(signal, n_bkps=0)
with pytest.raises(BadSegmentationParameters):
KernelCPD(kernel=kernel, min_size=10, jump=2).fit_predict(signal, pen=0.2)
assert (
len(KernelCPD(kernel=kernel, min_size=5, jump=2).fit_predict(signal, pen=0.2))
> 0
)
@pytest.mark.parametrize("kernel", ["linear", "rbf", "cosine"])
def test_kernelcpd_small_signal_same_result(signal_bkps_5D_n10, kernel):
signal, _ = signal_bkps_5D_n10
algo = KernelCPD(kernel=kernel)
list_of_segmentations = list()
n_iter = 100
for _ in range(n_iter):
bkps = algo.fit(signal=signal).predict(pen=1.0)
list_of_segmentations.append(bkps)
# test if all segmentations are equal
first_bkps = list_of_segmentations[0]
all_elements_are_equal = all(
first_bkps == other_bkps for other_bkps in list_of_segmentations[1:]
)
err_msg = "KernelCPD returns different segmentations on the same signal."
assert all_elements_are_equal, err_msg
@pytest.mark.parametrize(
"algo, model",
product(
[Binseg, BottomUp, Window],
["l1", "l2", "ar", "normal", "rbf", "rank", "mahalanobis"],
),
)
def test_model_small_signal(signal_bkps_5D_n10, algo, model):
signal, _ = signal_bkps_5D_n10
with pytest.raises(BadSegmentationParameters):
algo(model=model, min_size=5, jump=2).fit_predict(signal, n_bkps=2)
assert (
len(algo(model=model, min_size=5, jump=2).fit_predict(signal, pen=10 ** 6)) > 0
)
assert (
len(algo(model=model, min_size=5, jump=2).fit_predict(signal, epsilon=10)) > 0
)
assert (
len(algo(model=model, min_size=9, jump=2).fit_predict(signal, pen=10 ** 6)) > 0
)
@pytest.mark.parametrize(
"model", ["l1", "l2", "ar", "normal", "rbf", "rank", "mahalanobis"]
)
def test_model_small_signal_dynp(signal_bkps_5D_n10, model):
signal, _ = signal_bkps_5D_n10
with pytest.raises(BadSegmentationParameters):
Dynp(model=model, min_size=5, jump=2).fit_predict(signal, 2)
with pytest.raises(BadSegmentationParameters):
Dynp(model=model, min_size=9, jump=2).fit_predict(signal, 2)
with pytest.raises(BadSegmentationParameters):
Dynp(model=model, min_size=11, jump=2).fit_predict(signal, 2)
@pytest.mark.parametrize(
"model", ["l1", "l2", "ar", "normal", "rbf", "rank", "mahalanobis"]
)
def test_model_small_signal_pelt(signal_bkps_5D_n10, model):
signal, _ = signal_bkps_5D_n10
with pytest.raises(BadSegmentationParameters):
Pelt(model=model, min_size=11, jump=2).fit_predict(signal, 2)
assert len(Pelt(model=model, min_size=10, jump=2).fit_predict(signal, 1.0)) > 0
def test_binseg_deepcopy():
binseg = Binseg()
binseg_copy = deepcopy(binseg)
assert id(binseg.single_bkp) != id(binseg_copy.single_bkp)
| 31.250646 | 88 | 0.669257 |
795609d95e6ad8215a945b8c4d1432d8c4b39416 | 1,449 | py | Python | models/body/control_body.py | Mirmik/zippo | 50097d9b33c165d8f6a8ec65b22db4b1c4e1f61c | [
"MIT"
] | null | null | null | models/body/control_body.py | Mirmik/zippo | 50097d9b33c165d8f6a8ec65b22db4b1c4e1f61c | [
"MIT"
] | null | null | null | models/body/control_body.py | Mirmik/zippo | 50097d9b33c165d8f6a8ec65b22db4b1c4e1f61c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#coding: utf-8
from zencad import *
import params
x = params.body_x
y = params.body_y
t = params.bottom_wall_thikness
z = 35
platform_offset = 40
filrad = 5 #Радиус скругления
zh = 4
nhz = 3
nut_holder = linear_extrude(ngon(r=5.5,n=6),(0,0,nhz)) - linear_extrude(ngon(r=3.5,n=6),(0,0,nhz))
voltdisp = (26, 45)
base = rectangle(x, y, center = True).extrude(z+filrad*2).fillet(filrad).down(z) - halfspace().mirrorXY()
def control_body():
m = (
#Основное тело
thicksolid(base, -t, [point3(0,0,0)]).up(z)
#Колоны
- sqrtrans()(cylinder(r=5,h=z).translate(x/2-5,y/2-5,0))
+ sqrtrans()(cylinder(r=5,h=z).translate(x/2-5,y/2-5,0))#.up(zh)
#Отверстия в колоннах
- sqrtrans()(cylinder(r=3,h=z).translate(x/2-5,y/2-5,0))
#Отверстия под крепления поворотной платформы
- sqrtrans()(cylinder(r=2,h=t).translate(24,24,0)).forw(platform_offset)
- square(20).fillet2d(3).extrude(t).translate(-10,platform_offset-10,0)
#Держатели гаек
+ sqrtrans()(nut_holder.translate(24,24,0)).forw(platform_offset).up(t/2)
- rectangle(20, 10, center = True).fillet2d(3).extrude(t)
- rectangle(16, 8, center = True).fillet2d(3).extrude(t).right(40)
- rectangle(16, 8, center = True).fillet2d(3).extrude(t).left(40)
- rectangle(voltdisp[1], voltdisp[0], center = True).extrude(t).right(35).back(54)
).mirrorXY()
return m
if __name__ == "__main__":
display(control_body())
#display(b())
show() | 24.559322 | 106 | 0.668047 |
79560b720cb7d07563bc55bbd6d5e9223ad7b6d3 | 10,129 | py | Python | benchmark/old/dbc/tb/d3_cpu_TensorBayes_v4.2.py | jklopf/tensorbayes | 4b0cb3c565e9603a972135ddf7cfbe28a23b3a4a | [
"MIT"
] | 1 | 2018-11-12T16:58:32.000Z | 2018-11-12T16:58:32.000Z | benchmark/old/dbc/tb/d3_cpu_TensorBayes_v4.2.py | jklopf/tensorbayes | 4b0cb3c565e9603a972135ddf7cfbe28a23b3a4a | [
"MIT"
] | null | null | null | benchmark/old/dbc/tb/d3_cpu_TensorBayes_v4.2.py | jklopf/tensorbayes | 4b0cb3c565e9603a972135ddf7cfbe28a23b3a4a | [
"MIT"
] | null | null | null | ## Imports
import tensorflow as tf
import tensorflow_probability as tfp
import numpy as np
from timeit import repeat
tfd = tfp.distributions
import psutil
from tqdm import tqdm
'''
This version is able to retrieve the simulated parameters and
store the history of the sampling, and implement the tensorflow dataset API instead of
placeholders to feed data. Compared to other tensorflow-based version, the runtime has reduced of 10s.
It also implements algorithm optimization, where whole dataset matrice multiplication are avoided:
Emu is not sampled anymore.
epsilon is updated only during the dataset full pass, and not also after.
It also implements control dependencies, to minimize the number of sess.run() calls.
'''
# Reset the graph
tf.reset_default_graph()
## Reproducibility
# Seed setting for reproducable research.
# Set numpy seed
np.random.seed(1234)
# Print options (dev)
# np.set_printoptions(precision=5, floatmode='fixed')
# Set graph-level seed
tf.set_random_seed(1234)
## Util functions
def tf_squared_norm(vector):
sum_of_squares = tf.reduce_sum(tf.square(vector))
return sum_of_squares
def np_squared_norm(vector):
sum_of_squares = np.sum(np.square(vector))
return sum_of_squares
## Distributions functions
def rnorm(mean, var):
# rnorm is defined using the variance (i.e sigma^2)
sd = tf.sqrt(var)
dist = tfd.Normal(loc= mean, scale= sd)
sample = dist.sample()
return sample
def rbeta(a, b):
dist = tfd.Beta(a, b)
sample = dist.sample()
return sample
def rinvchisq(df, scale):
# scale factor = tau^2
dist = tfd.Chi2(df)
sample = (df * scale)/dist.sample()
return sample
def rbernoulli(p):
dist = tfd.Bernoulli(probs=p)
sample = dist.sample()
return sample
## Sampling functions
def sample_mu(N, Sigma2_e, Y, X, betas):
# sample mean
mean = tf.reduce_sum(tf.subtract(Y, tf.matmul(X, betas)))/N
var = Sigma2_e/N
sample = rnorm(mean,var)
return sample
def sample_sigma2_b(betas, NZ, v0B, s0B):
# sample variance of beta
df = v0B+NZ
scale = (tf_squared_norm(betas)+v0B*s0B) / df
sample = rinvchisq(df, scale)
return sample
def sample_sigma2_e(N, epsilon, v0E, s0E):
# sample error variance of Y
df = v0E + N
scale = (tf_squared_norm(epsilon) + v0E*s0E) / df
sample = rinvchisq(df, scale)
return sample
def sample_w(M, NZ):
# sample mixture weight
sample = rbeta(NZ+1, M-NZ+1)
return sample
def sample_beta(x_j, eps, s2e, s2b, w, beta_old):
# sample a beta
eps = eps + (x_j*beta_old)
Cj = tf_squared_norm(x_j) + s2e/s2b
rj = tf.tensordot(tf.transpose(x_j), eps, 1)[0,0]
ratio = tf.exp( - ( tf.square(rj) / ( 2*Cj*s2e ))) * tf.sqrt((s2b*Cj)/s2e)
pij = w / (w + ratio*(1-w))
toss = rbernoulli(pij)
def case_zero():
return 0., 0.
def case_one():
return rnorm(rj/Cj, s2e/Cj), 1.
beta_new, ny_new = tf.cond(tf.equal(toss,1),case_one, case_zero)
eps = eps - (x_j*beta_new)
return beta_new, ny_new, eps
## Simulate data
def build_toy_dataset(N, M, var_g):
sigma_b = np.sqrt(var_g/M)
sigma_e = np.sqrt(1 - var_g)
beta_true = np.random.normal(0, sigma_b , M)
x = sigma_b * np.random.randn(N, M)
y = np.dot(x, beta_true) + np.random.normal(0, sigma_e, N)
return x, y, beta_true
# Simulated data parameters
'''
Var(b) = Var(g) / M
Var(e) = 1 - Var(g)
'''
# D3
N = 10000 # number of data points
M = 20000 # number of features
var_g = 0.7 # genetic variance parameter
# Benchmark parameters and logs
# oa: overall
n_time = 20
oa_mean_s2b = []
oa_mean_s2e = []
oa_cor = []
oa_pip = []
def gibb():
global N, M
global oa_mean_s2b
global oa_mean_s2e
global oa_cor, oa_pip
x, y, beta_true = build_toy_dataset(N, M, var_g)
x = np.transpose(x)
X = tf.constant(x, shape=[M,N], dtype=tf.float32) # /!\ shape is now [M,N] /!\
Y = tf.constant(y, shape=[N,1], dtype=tf.float32)
## Dataset API implementation
data_index = tf.data.Dataset.range(M) # reflect which column was selected at random
data_x = tf.data.Dataset.from_tensor_slices(X) # reflects the randomly selected column
data = tf.data.Dataset.zip((data_index, data_x)).shuffle(M) # zip together and shuffle them
iterator = data.make_initializable_iterator() # reinitializable iterator: initialize at each gibbs iteration
ind, col = iterator.get_next() # dataset element
colx = tf.reshape(col, [N,1]) # reshape the array element as a column vector
# Could be implemented:
# building datasets using TF API without numpy
'''
TODO: Actually implement all the algorithm optimizations of the reference article
which are not implemented here. Depends on later implementations of input pipeline.
'''
# Parameters setup
#
# Distinction between constant and variables
# Variables: values might change between evaluation of the graph
# (if something changes within the graph, it should be a variable)
# Variables:
Ebeta = tf.Variable(tf.zeros([M,1], dtype=tf.float32), dtype=tf.float32)
Ny = tf.Variable(tf.zeros(M, dtype=tf.float32), dtype=tf.float32)
NZ = tf.Variable(0., dtype=tf.float32)
Ew = tf.Variable(0., dtype=tf.float32)
epsilon = tf.Variable(Y, dtype=tf.float32)
Sigma2_e = tf.Variable(tf_squared_norm(Y) / (N*0.5), dtype=tf.float32)
Sigma2_b = tf.Variable(rbeta(1., 1.), dtype=tf.float32)
# Constants:
v0E = tf.constant(0.001, dtype=tf.float32)
v0B = tf.constant(0.001, dtype=tf.float32)
s0B = Sigma2_b.initialized_value() / 2
s0E = Sigma2_e.initialized_value() / 2
# Tensorboard graph
# TODO: look up what TensorBoard can do, this can be used in the end to have a graph representation of the algorithm.
# Also, for graph clarity, operations should be named.
#writer = tf.summary.FileWriter('.')
#writer.add_graph(tf.get_default_graph())
# Computations: computation for each column
# ta: to assign
ta_beta, ta_ny, ta_eps = sample_beta(colx, epsilon, Sigma2_e, Sigma2_b, Ew, Ebeta[ind,0])
# Assignment ops:
# As we don't chain assignment operations, assignment does not require to return the evaluation of the new value
# therefore, all read_value are set to False. This changes runtime for about 1 sec (see above).
# Run with `read_value = True`: 63.4s
# Run with `read_value = False`: 62.2s
# maybe there is a trick here for storing the log using read_value
beta_item_assign_op = Ebeta[ind,0].assign(ta_beta) # when doing item assignment, read_value becomes an unexpected parameter,
ny_item_assign_op = Ny[ind].assign(ta_ny) # as tensorflow doesn't know what to return the single item or the whole variable
eps_up_fl = epsilon.assign(ta_eps, read_value=False)
fullpass = tf.group(beta_item_assign_op, ny_item_assign_op, eps_up_fl)
s2e_up = Sigma2_e.assign(sample_sigma2_e(N,epsilon,v0E,s0E), read_value=False)
nz_up = NZ.assign(tf.reduce_sum(Ny), read_value=False)
first_round = tf.group(nz_up,s2e_up)
# Control dependencies:
with tf.control_dependencies([first_round]):
ew_up = Ew.assign(sample_w(M,NZ), read_value=False)
s2b_up = Sigma2_b.assign(sample_sigma2_b(Ebeta,NZ,v0B,s0B), read_value=False)
param_up = tf.group(ew_up, s2b_up)
# Logs definition:
param_log = [] # order: Sigma2_e, Sigma2_b
beta_log = [] # as rank 1 vector
ny_log = []
# Number of Gibbs sampling iterations
num_iter = 5000
burned_samples_threshold = 2000
# Launch of session
with tf.Session() as sess:
# Initialize variable
sess.run(tf.global_variables_initializer())
# Gibbs sampler iterations
for i in tqdm(range(num_iter)): # TODO: replace with tf.while ?
# While loop: dataset full pass
sess.run(iterator.initializer)
while True: # Loop on 'col_next', the queue of column iterator
try: # Run Ebeta item assign op
sess.run(fullpass)
except tf.errors.OutOfRangeError:
# End of full pass, update parameters
sess.run(param_up)
# Exit while loop to enter next Gibbs iteration
break
# Store sampling logs
if(i >= burned_samples_threshold):
param_log.append(sess.run([Sigma2_e, Sigma2_b]))
beta_log.append(np.array(sess.run(Ebeta)).reshape(M))
ny_log.append(sess.run(Ny))
# Store local results
param_log = np.array(param_log) # [s2e, s2b]
mean_s2e = np.mean(param_log[:,0])
mean_s2b = np.mean(param_log[:,1])
mean_ebeta = np.mean(beta_log, axis=0)
pip = np.mean(ny_log, axis = 0)
corr_ebeta_betatrue = np.corrcoef(mean_ebeta, beta_true)[0][1]
# Store overall results
oa_mean_s2e.append(mean_s2e)
oa_mean_s2b.append(mean_s2b)
oa_pip.append(len([num for num in pip if num >= 0.95]))
oa_cor.append(corr_ebeta_betatrue)
# Measure running times and execute the code n_time
oa_time = repeat('gibb()',repeat=n_time, number=1, setup='from __main__ import gibb')
# Measure memory usage
mem = psutil.Process().memory_info()
rss = np.round(mem.rss / (1024**2), 5)
vms = np.round(mem.vms / (1024**2), 5)
# Output benchmark logs
print('\nD3 Logs: TensorBayes v4.2 on CPU')
print('N = {}, M = {}, var(g) = {}'.format(N,M,var_g))
print('\nrss memory (physical): {} MiB'.format(rss))
print('vms memory (virtual): {} MiB'.format(vms))
print('\nMin time of execution: ', np.round(np.min(oa_time)),4)
print('Mean time of execution memory: ', np.round(np.mean(oa_time),4))
# Write results to a .csv
# Order: s2e | s2b | cor | pip | time
results = np.stack((
oa_mean_s2e,
oa_mean_s2b,
oa_cor,
oa_pip,
oa_time), axis=-1)
np.savetxt(
'd3_tb_cpu_results.csv',
results,
delimiter=',',
header='sigma2_e, sigma2_b, cor(eb,bt), PiP, time',
fmt='%.8f')
| 30.601208 | 141 | 0.663145 |
79560ca4df259e8a172e3afa2803dc3a6a104d32 | 27 | py | Python | Hello.py | kscime/MB215LAB1 | bd046ba2948d1a122d0d9e3d57a7247b7488d85e | [
"MIT"
] | null | null | null | Hello.py | kscime/MB215LAB1 | bd046ba2948d1a122d0d9e3d57a7247b7488d85e | [
"MIT"
] | null | null | null | Hello.py | kscime/MB215LAB1 | bd046ba2948d1a122d0d9e3d57a7247b7488d85e | [
"MIT"
] | null | null | null | print("Hello Katelyn!")
| 9 | 24 | 0.62963 |
79560ccae3db95790252e73d9081f4acbe76e4bb | 6,804 | py | Python | tests/test_client.py | xubiuit/jina | 4ab91693c2d51a35eca3cf6c187034e0568b0ac9 | [
"Apache-2.0"
] | null | null | null | tests/test_client.py | xubiuit/jina | 4ab91693c2d51a35eca3cf6c187034e0568b0ac9 | [
"Apache-2.0"
] | null | null | null | tests/test_client.py | xubiuit/jina | 4ab91693c2d51a35eca3cf6c187034e0568b0ac9 | [
"Apache-2.0"
] | null | null | null | import time
import numpy as np
import requests
from jina.clients import py_client
from jina.clients.python import PyClient
from jina.clients.python.io import input_files, input_numpy
from jina.drivers.helper import array2pb
from jina.enums import ClientMode
from jina.flow import Flow
from jina.main.parser import set_gateway_parser
from jina.peapods.gateway import RESTGatewayPea
from jina.proto.jina_pb2 import Document
from tests import JinaTestCase
class MyTestCase(JinaTestCase):
def test_client(self):
f = Flow().add(yaml_path='_forward')
with f:
print(py_client(port_grpc=f.port_grpc).call_unary(b'a1234', mode=ClientMode.INDEX))
def tearDown(self) -> None:
super().tearDown()
time.sleep(3)
def test_check_input(self):
input_fn = iter([b'1234', b'45467'])
PyClient.check_input(input_fn)
input_fn = iter([Document(), Document()])
PyClient.check_input(input_fn)
bad_input_fn = iter([b'1234', '45467', [12, 2, 3]])
self.assertRaises(TypeError, PyClient.check_input, bad_input_fn)
bad_input_fn = iter([Document(), None])
self.assertRaises(TypeError, PyClient.check_input, bad_input_fn)
def test_gateway_ready(self):
p = set_gateway_parser().parse_args([])
with RESTGatewayPea(p):
a = requests.get(f'http://0.0.0.0:{p.port_grpc}/ready')
self.assertEqual(a.status_code, 200)
with RESTGatewayPea(p):
a = requests.post(f'http://0.0.0.0:{p.port_grpc}/api/ass')
self.assertEqual(a.status_code, 405)
def test_gateway_index(self):
f = Flow(rest_api=True).add(yaml_path='_forward')
with f:
a = requests.post(f'http://0.0.0.0:{f.port_grpc}/api/index',
json={'data': [
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC',
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AvdGjTZeOlQq07xSYPgJjlWRwfWEBx2+CgAVrPrP+O5ghhOa+a0cocoWnaMJFAsBuCQCgiJOKDBcIQTiLieOrPD/cp/6iZ/Iu4HqAh5dGzggIQVJI3WqTxwVTDjs5XJOy38AlgHoaKgY+xJEXeFTyR7FOfF7JNWjs3b8evQE6B2dTDvQZx3n3Rz6rgOtVlaZRLvR9geCAxuY3G+0mepEAhrTISES3bwPWYYi48OUrQOc//IaJeij9xZGGmDIG9kc73fNI7eA8VMBAAD//0SxXMMT90UdAAAAAElFTkSuQmCC']})
j = a.json()
self.assertTrue('index' in j)
self.assertEqual(len(j['index']['docs']), 2)
self.assertEqual(j['index']['docs'][0]['uri'],
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC')
self.assertEqual(a.status_code, 200)
def test_gateway_index_with_args(self):
f = Flow(rest_api=True).add(yaml_path='_forward')
with f:
a = requests.post(f'http://0.0.0.0:{f.port_grpc}/api/index',
json={'data': [
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC',
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AvdGjTZeOlQq07xSYPgJjlWRwfWEBx2+CgAVrPrP+O5ghhOa+a0cocoWnaMJFAsBuCQCgiJOKDBcIQTiLieOrPD/cp/6iZ/Iu4HqAh5dGzggIQVJI3WqTxwVTDjs5XJOy38AlgHoaKgY+xJEXeFTyR7FOfF7JNWjs3b8evQE6B2dTDvQZx3n3Rz6rgOtVlaZRLvR9geCAxuY3G+0mepEAhrTISES3bwPWYYi48OUrQOc//IaJeij9xZGGmDIG9kc73fNI7eA8VMBAAD//0SxXMMT90UdAAAAAElFTkSuQmCC'],
'first_doc_id': 5,
})
j = a.json()
self.assertTrue('index' in j)
self.assertEqual(len(j['index']['docs']), 2)
self.assertEqual(j['index']['docs'][0]['docId'], 5)
self.assertEqual(j['index']['docs'][1]['docId'], 6)
self.assertEqual(j['index']['docs'][0]['uri'],
'data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAgAAAAICAIAAABLbSncAAAA2ElEQVR4nADIADf/AxWcWRUeCEeBO68T3u1qLWarHqMaxDnxhAEaLh0Ssu6ZGfnKcjP4CeDLoJok3o4aOPYAJocsjktZfo4Z7Q/WR1UTgppAAdguAhR+AUm9AnqRH2jgdBZ0R+kKxAFoAME32BL7fwQbcLzhw+dXMmY9BS9K8EarXyWLH8VYK1MACkxlLTY4Eh69XfjpROqjE7P0AeBx6DGmA8/lRRlTCmPkL196pC0aWBkVs2wyjqb/LABVYL8Xgeomjl3VtEMxAeaUrGvnIawVh/oBAAD///GwU6v3yCoVAAAAAElFTkSuQmCC')
self.assertEqual(a.status_code, 200)
def test_io_files(self):
PyClient.check_input(input_files('*.*'))
PyClient.check_input(input_files('*.*', recursive=True))
PyClient.check_input(input_files('*.*', size=2))
PyClient.check_input(input_files('*.*', size=2, read_mode='rb'))
PyClient.check_input(input_files('*.*', sampling_rate=.5))
f = Flow().add(yaml_path='- !URI2Buffer {}')
def validate_mime_type(req):
for d in req.index.docs:
self.assertEqual(d.mime_type, 'text/x-python')
with f:
f.index(input_files('*.py'), validate_mime_type)
def test_io_np(self):
print(type(np.random.random([100, 4])))
PyClient.check_input(input_numpy(np.random.random([100, 4, 2])))
PyClient.check_input(['asda', 'dsadas asdasd'])
print(type(array2pb(np.random.random([100, 4, 2]))))
def test_unary_driver(self):
f = Flow().add(yaml_path='yaml/unarycrafter.yml')
def check_non_empty(req, field):
for d in req.index.docs:
self.assertEqual(len(d.chunks), 1)
self.assertEqual(d.chunks[0].WhichOneof('content'), field)
with f:
f.index_numpy(np.random.random([10, 4, 2]), output_fn=lambda x: check_non_empty(x, 'blob'))
with f:
f.index(np.random.random([10, 4, 2]), output_fn=lambda x: check_non_empty(x, 'blob'))
with f:
f.index(['asda', 'dsadas asdasd'], output_fn=lambda x: check_non_empty(x, 'text'))
| 56.7 | 425 | 0.701058 |
79560d28d3666623d2bc2451083c52743b142a9d | 14,718 | py | Python | gibson/core/physics/robot_bases.py | micheleantonazzi/GibsonEnv | 4b9c57712efdf519015f9ba40c85a6b168ed3660 | [
"MIT"
] | null | null | null | gibson/core/physics/robot_bases.py | micheleantonazzi/GibsonEnv | 4b9c57712efdf519015f9ba40c85a6b168ed3660 | [
"MIT"
] | null | null | null | gibson/core/physics/robot_bases.py | micheleantonazzi/GibsonEnv | 4b9c57712efdf519015f9ba40c85a6b168ed3660 | [
"MIT"
] | null | null | null | ## Author: pybullet, Zhiyang He
import pybullet as p
import gym, gym.spaces, gym.utils
import numpy as np
import os, inspect
from gibson.assets.assets_manager import AssetsManager
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
os.sys.path.insert(0,parentdir)
import pybullet_data
from gibson import assets
from transforms3d.euler import euler2quat
from transforms3d import quaternions
def quatFromXYZW(xyzw, seq='xyzw'):
"""Convert quaternion from arbitrary sequence to XYZW (pybullet convention)
"""
assert len(seq) == 4 and 'x' in seq and 'y' in seq and 'z' in seq and 'w' in seq, \
"Quaternion sequence {} is not valid, please double check.".format(seq)
inds = [seq.index('x'), seq.index('y'), seq.index('z'), seq.index('w')]
return xyzw[inds]
def quatToXYZW(orn, seq='xyzw'):
"""Convert quaternion from XYZW (pybullet convention) to arbitrary sequence
"""
assert len(seq) == 4 and 'x' in seq and 'y' in seq and 'z' in seq and 'w' in seq, \
"Quaternion sequence {} is not valid, please double check.".format(seq)
inds = [seq.index('x'), seq.index('y'), seq.index('z'), seq.index('w')]
return orn[inds]
class BaseRobot:
"""
Base class for mujoco .xml/ROS urdf based agents.
Handles object loading
"""
def __init__(self, model_file, robot_name, scale = 1, env = None):
self.parts = None
self.jdict = None
self.ordered_joints = None
self.robot_body = None
self.robot_ids = None
self.model_file = model_file
self.robot_name = robot_name
self.physics_model_dir = os.path.join(AssetsManager().get_assets_path(), 'models')
self.scale = scale
self._load_model()
self.eyes = self.parts["eyes"]
self.env = env
def addToScene(self, bodies):
if self.parts is not None:
parts = self.parts
else:
parts = {}
if self.jdict is not None:
joints = self.jdict
else:
joints = {}
if self.ordered_joints is not None:
ordered_joints = self.ordered_joints
else:
ordered_joints = []
dump = 0
for i in range(len(bodies)):
if p.getNumJoints(bodies[i]) == 0:
part_name, robot_name = p.getBodyInfo(bodies[i], 0)
robot_name = robot_name.decode("utf8")
part_name = part_name.decode("utf8")
parts[part_name] = BodyPart(part_name, bodies, i, -1, self.scale, model_type=self.model_type)
for j in range(p.getNumJoints(bodies[i])):
p.setJointMotorControl2(bodies[i],j,p.POSITION_CONTROL,positionGain=0.1,velocityGain=0.1,force=0)
## TODO (hzyjerry): the following is diabled due to pybullet update
#_,joint_name,joint_type, _,_,_, _,_,_,_, _,_, part_name = p.getJointInfo(bodies[i], j)
_,joint_name,joint_type, _,_,_, _,_,_,_, _,_, part_name, _,_,_,_ = p.getJointInfo(bodies[i], j)
joint_name = joint_name.decode("utf8")
part_name = part_name.decode("utf8")
if dump: print("ROBOT PART '%s'" % part_name)
if dump: print("ROBOT JOINT '%s'" % joint_name) # limits = %+0.2f..%+0.2f effort=%0.3f speed=%0.3f" % ((joint_name,) + j.limits()) )
parts[part_name] = BodyPart(part_name, bodies, i, j, self.scale, model_type=self.model_type)
if part_name == self.robot_name:
self.robot_body = parts[part_name]
if i == 0 and j == 0 and self.robot_body is None: # if nothing else works, we take this as robot_body
parts[self.robot_name] = BodyPart(self.robot_name, bodies, 0, -1, self.scale, model_type=self.model_type)
self.robot_body = parts[self.robot_name]
if joint_name[:6] == "ignore":
Joint(joint_name, bodies, i, j, self.scale).disable_motor()
continue
if joint_name[:8] != "jointfix" and joint_type != p.JOINT_FIXED:
joints[joint_name] = Joint(joint_name, bodies, i, j, self.scale, model_type=self.model_type)
ordered_joints.append(joints[joint_name])
joints[joint_name].power_coef = 100.0
debugmode = 0
if debugmode:
for j in ordered_joints:
print(j, j.power_coef)
return parts, joints, ordered_joints, self.robot_body
def _load_model(self):
if self.model_type == "MJCF":
self.robot_ids = p.loadMJCF(os.path.join(self.physics_model_dir, self.model_file), flags=p.URDF_USE_SELF_COLLISION+p.URDF_USE_SELF_COLLISION_EXCLUDE_ALL_PARENTS)
if self.model_type == "URDF":
self.robot_ids = (p.loadURDF(os.path.join(self.physics_model_dir, self.model_file), globalScaling = self.scale), )
self.parts, self.jdict, self.ordered_joints, self.robot_body = self.addToScene(self.robot_ids)
def reset(self):
if self.robot_ids is None:
self._load_model()
self.robot_body.reset_orientation(quatToXYZW(euler2quat(*self.config["initial_orn"]), 'wxyz'))
self.robot_body.reset_position(self.config["initial_pos"])
self.reset_random_pos()
self.robot_specific_reset()
state = self.calc_state()
return state
def reset_random_pos(self):
'''Add randomness to resetted initial position
'''
if not self.config["random"]["random_initial_pose"]:
return
pos = self.robot_body.get_position()
orn = self.robot_body.get_orientation()
x_range = self.config["random"]["random_init_x_range"]
y_range = self.config["random"]["random_init_y_range"]
z_range = self.config["random"]["random_init_z_range"]
r_range = self.config["random"]["random_init_rot_range"]
new_pos = [ pos[0] + self.np_random.uniform(low=x_range[0], high=x_range[1]),
pos[1] + self.np_random.uniform(low=y_range[0], high=y_range[1]),
pos[2] + self.np_random.uniform(low=z_range[0], high=z_range[1])]
new_orn = quaternions.qmult(quaternions.axangle2quat([1, 0, 0], self.np_random.uniform(low=r_range[0], high=r_range[1])), orn)
self.robot_body.reset_orientation(new_orn)
self.robot_body.reset_position(new_pos)
def reset_new_pose(self, pos, orn):
self.robot_body.reset_orientation(orn)
self.robot_body.reset_position(pos)
def calc_potential(self):
return 0
class Pose_Helper:
def __init__(self, body_part):
self.body_part = body_part
def xyz(self):
return self.body_part.get_position()
def rpy(self):
return p.getEulerFromQuaternion(self.body_part.get_orientation())
def orientation(self):
return self.body_part.get_orientation()
class BodyPart:
def __init__(self, body_name, bodies, bodyIndex, bodyPartIndex, scale, model_type):
self.bodies = bodies
self.body_name = body_name
self.bodyIndex = bodyIndex
self.bodyPartIndex = bodyPartIndex
if model_type=="MJCF":
self.scale = scale
else:
self.scale = 1
self.initialPosition = self.get_position() / self.scale
self.initialOrientation = self.get_orientation()
self.bp_pose = Pose_Helper(self)
def get_name(self):
return self.body_name
def _state_fields_of_pose_of(self, body_id, link_id=-1):
"""Calls native pybullet method for getting real (scaled) robot body pose
Note that there is difference between xyz in real world scale and xyz
in simulation. Thus you should never call pybullet methods directly
"""
if link_id == -1:
(x, y, z), (a, b, c, d) = p.getBasePositionAndOrientation(body_id)
else:
(x, y, z), (a, b, c, d), _, _, _, _ = p.getLinkState(body_id, link_id)
x, y, z = x * self.scale, y * self.scale, z * self.scale
return np.array([x, y, z, a, b, c, d])
def _set_fields_of_pose_of(self, pos, orn):
"""Calls native pybullet method for setting real (scaled) robot body pose"""
p.resetBasePositionAndOrientation(self.bodies[self.bodyIndex], np.array(pos) / self.scale, orn)
def get_pose(self):
return self._state_fields_of_pose_of(self.bodies[self.bodyIndex], self.bodyPartIndex)
def get_position(self):
"""Get position of body part
Position is defined in real world scale """
return self.get_pose()[:3]
def get_orientation(self):
"""Get orientation of body part
Orientation is by default defined in [x,y,z,w]"""
return self.get_pose()[3:]
def set_position(self, position):
"""Get position of body part
Position is defined in real world scale """
self._set_fields_of_pose_of(position, self.get_orientation())
def set_orientation(self, orientation):
"""Get position of body part
Orientation is defined in [x,y,z,w]"""
self._set_fields_of_pose_of(self.current_position(), orientation)
def set_pose(self, position, orientation):
self._set_fields_of_pose_of(position, orientation)
def pose(self):
return self.bp_pose
def current_position(self): # Synonym method
return self.get_position()
def current_orientation(self): # Synonym method
return self.get_orientation()
def reset_position(self, position): # Backward compatibility
self.set_position(position)
def reset_orientation(self, orientation): # Backward compatibility
self.set_orientation(orientation)
def reset_pose(self, position, orientation): # Backward compatibility
self.set_pose(position, orientation)
def speed(self):
if self.bodyPartIndex == -1:
(vx, vy, vz), _ = p.getBaseVelocity(self.bodies[self.bodyIndex])
else:
(x,y,z), (a,b,c,d), _,_,_,_, (vx, vy, vz), (vr,vp,vyaw) = p.getLinkState(self.bodies[self.bodyIndex], self.bodyPartIndex, computeLinkVelocity=1)
return np.array([vx, vy, vz])
def angular_speed(self):
if self.bodyPartIndex == -1:
_, (vr,vp,vyaw) = p.getBaseVelocity(self.bodies[self.bodyIndex])
else:
(x,y,z), (a,b,c,d), _,_,_,_, (vx, vy, vz), (vr,vp,vyaw) = p.getLinkState(self.bodies[self.bodyIndex], self.bodyPartIndex, computeLinkVelocity=1)
return np.array([vr, vp, vyaw])
def contact_list(self):
return p.getContactPoints(self.bodies[self.bodyIndex], -1, self.bodyPartIndex, -1)
class Joint:
def __init__(self, joint_name, bodies, bodyIndex, jointIndex, scale, model_type):
self.bodies = bodies
self.bodyIndex = bodyIndex
self.jointIndex = jointIndex
self.joint_name = joint_name
_,_,self.jointType,_,_,_,_,_,self.lowerLimit, self.upperLimit,_,_,_, _,_,_,_ = p.getJointInfo(self.bodies[self.bodyIndex], self.jointIndex)
self.power_coeff = 0
if model_type=="MJCF":
self.scale = scale
else:
self.scale = 1
if self.jointType == p.JOINT_PRISMATIC:
self.upperLimit *= self.scale
self.lowerLimit *= self.scale
def __str__(self):
return "idx: {}, name: {}".format(self.jointIndex, self.joint_name)
def get_state(self):
"""Get state of joint
Position is defined in real world scale """
x, vx,_,_ = p.getJointState(self.bodies[self.bodyIndex],self.jointIndex)
if self.jointType == p.JOINT_PRISMATIC:
x *= self.scale
vx *= self.scale
return x, vx
def set_state(self, x, vx):
"""Set state of joint
x is defined in real world scale """
if self.jointType == p.JOINT_PRISMATIC:
x /= self.scale
vx /= self.scale
p.resetJointState(self.bodies[self.bodyIndex], self.jointIndex, x, vx)
def get_relative_state(self):
pos, vel = self.get_state()
pos_mid = 0.5 * (self.lowerLimit + self.upperLimit);
return (
2 * (pos - pos_mid) / (self.upperLimit - self.lowerLimit),
0.1 * vel
)
def set_position(self, position):
"""Set position of joint
Position is defined in real world scale """
if self.jointType == p.JOINT_PRISMATIC:
position = np.array(position) / self.scale
p.setJointMotorControl2(self.bodies[self.bodyIndex],self.jointIndex,p.POSITION_CONTROL, targetPosition=position)
def set_velocity(self, velocity):
"""Set velocity of joint
Velocity is defined in real world scale """
if self.jointType == p.JOINT_PRISMATIC:
velocity = np.array(velocity) / self.scale
p.setJointMotorControl2(self.bodies[self.bodyIndex],self.jointIndex,p.VELOCITY_CONTROL, targetVelocity=velocity) # , positionGain=0.1, velocityGain=0.1)
def set_torque(self, torque):
p.setJointMotorControl2(bodyIndex=self.bodies[self.bodyIndex], jointIndex=self.jointIndex, controlMode=p.TORQUE_CONTROL, force=torque) #, positionGain=0.1, velocityGain=0.1)
def reset_state(self, pos, vel):
self.set_state(pos, vel)
def disable_motor(self):
p.setJointMotorControl2(self.bodies[self.bodyIndex],self.jointIndex,controlMode=p.POSITION_CONTROL, targetPosition=0, targetVelocity=0, positionGain=0.1, velocityGain=0.1, force=0)
def get_joint_relative_state(self): # Synonym method
return self.get_relative_state()
def set_motor_position(self, pos): # Synonym method
return self.set_position(pos)
def set_motor_torque(self, torque): # Synonym method
return self.set_torque(torque)
def set_motor_velocity(self, vel): # Synonym method
return self.set_velocity(vel)
def reset_joint_state(self, position, velocity): # Synonym method
return self.reset_state(position, velocity)
def current_position(self): # Backward compatibility
return self.get_state()
def current_relative_position(self): # Backward compatibility
return self.get_relative_state()
def reset_current_position(self, position, velocity): # Backward compatibility
self.reset_state(position, velocity)
def reset_position(self, position, velocity): # Backward compatibility
self.reset_state(position, velocity) | 39.778378 | 188 | 0.633578 |
79560d77721bf3983d8c8c52f9dc5b99f0c14c18 | 25,662 | py | Python | controllers/hrm.py | gnarula/eden | 13b930b6cfa1724a1d7e298aadd51dfb1feb53d6 | [
"MIT"
] | null | null | null | controllers/hrm.py | gnarula/eden | 13b930b6cfa1724a1d7e298aadd51dfb1feb53d6 | [
"MIT"
] | null | null | null | controllers/hrm.py | gnarula/eden | 13b930b6cfa1724a1d7e298aadd51dfb1feb53d6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Human Resource Management
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
s3db.hrm_vars()
# =============================================================================
def index():
""" Module Home Page """
mode = session.s3.hrm.mode
if mode is not None:
# Go to Personal Profile
redirect(URL(f="person"))
else:
# Bypass home page & go direct to searchable list of Staff
redirect(URL(f="staff", args="summary"))
# =============================================================================
# People
# =============================================================================
def human_resource():
"""
HR Controller
- combined Staff/Volunteers
Used for Summary view, Imports and S3AddPersonWidget2
"""
return s3db.hrm_human_resource_controller()
# -----------------------------------------------------------------------------
def staff():
""" Staff Controller """
# Staff only
s3.filter = FS("type") == 1
def prep(r):
table = r.table
tablename = r.tablename
get_vars = r.get_vars
# Use CRUD strings for staff
crud_strings = s3.crud_strings
crud_strings[tablename] = crud_strings["hrm_staff"]
resource = r.resource
if "expiring" in get_vars:
# Filter for staff with contracts expiring in the next 4 weeks
query = FS("end_date") < \
(request.utcnow + datetime.timedelta(weeks=4))
resource.add_filter(query)
# Adapt CRUD strings
crud_strings[tablename].title_list = \
T("Staff with Contracts Expiring in the next Month")
# Reconfigure
resource.configure(# Sort by Expiry
sortby = table.end_date,
# Remove the Add button
insertable=False
)
# Adapt list_fields
list_fields = [(T("Contract End Date"), "end_date"),
"person_id",
"job_title_id",
"organisation_id",
"department_id",
"site_id",
#"site_contact",
]
else:
# Adapt list_fields
list_fields = ["person_id",
"job_title_id",
"organisation_id",
"department_id",
"site_id",
#"site_contact",
(T("Email"), "email.value"),
(settings.get_ui_label_mobile_phone(), "phone.value"),
]
if settings.get_hrm_use_trainings():
list_fields.append("person_id$training.course_id")
if settings.get_hrm_use_certificates():
list_fields.append("person_id$certification.certificate_id")
list_fields.append((T("Contract End Date"), "end_date"))
list_fields.append("status")
resource.configure(list_fields = list_fields)
if r.interactive:
if r.id:
if r.method not in ("profile", "delete"):
# Redirect to person controller
vars = {
"human_resource.id": r.id,
"group": "staff"
}
args = []
if r.representation == "iframe":
vars["format"] = "iframe"
args = [r.method]
redirect(URL(f="person", vars=vars, args=args))
else:
if r.method == "import":
# Redirect to person controller
redirect(URL(f="person",
args="import",
vars={"group": "staff"}))
elif not r.component and r.method != "delete":
# Configure site_id
field = table.site_id
site_id = get_vars.get("site_id", None)
if site_id:
field.default = site_id
field.writable = False
field.comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % (
settings.get_org_site_label(),
T("The facility where this position is based."),
#messages.AUTOCOMPLETE_HELP,
)))
#field.comment = S3AddResourceLink(c="org", f="facility",
# vars = dict(child="site_id",
# parent="req"),
# title=T("Add New Site"),
# )
# Hide status field
table.status.writable = table.status.readable = False
# Assume staff only between 16-81
s3db.pr_person.date_of_birth.widget = S3DateWidget(past=972,
future=-192)
elif r.representation == "xls":
# Make it match Import sheets
list_fields = s3db.get_config(tablename, "list_fields")
# Remove "id" as XLS exporter doesn't like this not being first & has complicated skipping routines
try:
list_fields.remove("id")
except ValueError:
pass
# Separate Facility Type from Facility Name
table.site_id.represent = s3db.org_SiteRepresent(show_type = False)
i = 0
for f in list_fields:
i += 1
if f == "site_id":
break
list_fields.insert(i,
(T("Facility Type"),
"person_id$human_resource.site_id$instance_type"))
# Split person_id into first/middle/last
try:
list_fields.remove("person_id")
except ValueError:
pass
list_fields = ["person_id$first_name",
"person_id$middle_name",
"person_id$last_name",
] + list_fields
s3db.configure(tablename,
list_fields = list_fields)
return True
s3.prep = prep
def postp(r, output):
if r.interactive:
if not r.component:
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
s3_action_buttons(r, deletable=settings.get_hrm_deletable())
if "msg" in settings.modules and \
settings.get_hrm_compose_button() and \
auth.permission.has_permission("update", c="hrm", f="compose"):
# @ToDo: Remove this now that we have it in Events?
s3.actions.append(
{"url": URL(f="compose",
vars = {"human_resource.id": "[id]"}),
"_class": "action-btn send",
"label": str(T("Send Message"))
})
#s3.scripts.append("/%s/static/scripts/jquery.doubleScroll.js" % appname)
#s3.jquery_ready.append('''$('.dataTable_table').doubleScroll()''')
#s3.jquery_ready.append('''$('.dataTables_wrapper').doubleScroll()''')
elif r.representation == "plain":
# Map Popups
output = s3db.hrm_map_popup(r)
return output
s3.postp = postp
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person():
"""
Person Controller
- used for access to component Tabs, Personal Profile & Imports
- includes components relevant to HRM
"""
return s3db.hrm_person_controller()
# -----------------------------------------------------------------------------
def profile():
"""
Profile Controller
- includes components relevant to HRM
"""
request.args = [str(s3_logged_in_person())]
# Custom Method for Contacts
s3db.set_method("pr", resourcename,
method = "contacts",
action = s3db.pr_Contacts)
if settings.has_module("asset"):
# Assets as component of people
s3db.add_components("pr_person",
asset_asset = "assigned_to_id",
)
group = get_vars.get("group", "staff")
# Configure human resource table
tablename = "hrm_human_resource"
table = s3db[tablename]
table.type.default = 1
# Configure person table
tablename = "pr_person"
table = s3db[tablename]
s3db.configure(tablename,
deletable = False,
)
# Configure for personal mode
s3.crud_strings[tablename].update(
title_display = T("Personal Profile"),
title_update = T("Personal Profile"))
# CRUD pre-process
def prep(r):
if r.interactive and r.method != "import":
if r.component:
if r.component_name == "physical_description":
# Hide all but those details that we want
# Lock all the fields
table = r.component.table
for field in table.fields:
table[field].writable = table[field].readable = False
# Now enable those that we want
table.ethnicity.writable = table.ethnicity.readable = True
table.blood_type.writable = table.blood_type.readable = True
table.medical_conditions.writable = table.medical_conditions.readable = True
table.other_details.writable = table.other_details.readable = True
else:
table = r.table
table.pe_label.readable = table.pe_label.writable = False
table.missing.readable = table.missing.writable = False
table.age_group.readable = table.age_group.writable = False
# Assume volunteers only between 12-81
table.date_of_birth.widget = S3DateWidget(past=972, future=-144)
return True
else:
# Disable non-interactive & import
return False
s3.prep = prep
# CRUD post-process
def postp(r, output):
if r.interactive and r.component:
if r.component_name == "human_resource":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_human_resource_start_date','hrm_human_resource_end_date')''')
if r.component_name == "experience":
# Set the minimum end_date to the same as the start_date
s3.jquery_ready.append(
'''S3.start_end_date('hrm_experience_start_date','hrm_experience_end_date')''')
return output
s3.postp = postp
output = s3_rest_controller("pr", "person",
rheader = s3db.hrm_rheader,
)
return output
# -----------------------------------------------------------------------------
def hr_search():
"""
Human Resource REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter
group = get_vars.get("group", None)
if group == "staff":
s3.filter = FS("human_resource.type") == 1
elif group == "volunteer":
s3.filter = FS("human_resource.type") == 2
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("hrm", "human_resource")
# -----------------------------------------------------------------------------
def person_search():
"""
Person REST controller
- limited to just search_ac for use in Autocompletes
- allows differential access permissions
"""
# Filter
group = get_vars.get("group", None)
if group == "staff":
s3.filter = FS("human_resource.type") == 1
elif group == "volunteer":
s3.filter = FS("human_resource.type") == 2
s3.prep = lambda r: r.method == "search_ac"
return s3_rest_controller("pr", "person")
# =============================================================================
# Teams
# =============================================================================
def group():
"""
Team controller
- uses the group table from PR
"""
return s3db.hrm_group_controller()
# -----------------------------------------------------------------------------
def group_membership():
"""
Membership controller
- uses the group_membership table from PR
"""
# Change Labels & list_fields
s3db.hrm_configure_pr_group_membership()
# Only show Relief Teams
# Do not show system groups
# Only show Staff
table = db.pr_group_membership
gtable = db.pr_group
htable = s3db.hrm_human_resource
s3.filter = (gtable.system == False) & \
(gtable.group_type == 3) & \
(htable.type == 1) & \
(htable.person_id == table.person_id)
def prep(r):
if r.method in ("create", "create.popup", "update", "update.popup"):
# Coming from Profile page?
person_id = get_vars.get("~.person_id", None)
if person_id:
field = table.person_id
field.default = person_id
field.readable = field.writable = False
return True
s3.prep = prep
output = s3_rest_controller("pr", "group_membership",
csv_template="group_membership",
csv_stylesheet=("hrm", "group_membership.xsl"),
)
return output
# =============================================================================
# Jobs
# =============================================================================
def department():
""" Departments Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
if not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_department)
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def job_title():
""" Job Titles Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
s3.filter = FS("type").belongs((1, 3))
if not auth.s3_has_role(ADMIN):
s3.filter &= auth.filter_by_root_org(s3db.hrm_job_title)
output = s3_rest_controller()
return output
# =============================================================================
# Skills
# =============================================================================
def skill():
""" Skills Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_type():
""" Skill Types Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def competency_rating():
""" Competency Rating for Skill Types Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def skill_provision():
""" Skill Provisions Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def course():
""" Courses Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
if not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_course)
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def course_certificate():
""" Courses to Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def certificate():
""" Certificates Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
if settings.get_hrm_filter_certificates() and \
not auth.s3_has_role(ADMIN):
s3.filter = auth.filter_by_root_org(s3db.hrm_certificate)
output = s3_rest_controller(rheader=s3db.hrm_rheader)
return output
# -----------------------------------------------------------------------------
def certificate_skill():
""" Certificates to Skills Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# -----------------------------------------------------------------------------
def training():
""" Training Controller - used for Searching for Participants """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_training_controller()
# -----------------------------------------------------------------------------
def training_event():
""" Training Events Controller """
return s3db.hrm_training_event_controller()
# -----------------------------------------------------------------------------
def credential():
""" Credentials Controller """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_credential_controller()
# -----------------------------------------------------------------------------
def experience():
""" Experience Controller """
s3.filter = FS("person_id$human_resource.type") == 1
return s3db.hrm_experience_controller()
# -----------------------------------------------------------------------------
def competency():
"""
RESTful CRUD controller used to allow searching for people by Skill
"""
s3.filter = FS("person_id$human_resource.type") == 1
field = s3db.hrm_competency.person_id
field.widget = S3PersonAutocompleteWidget(ajax_filter = "~.human_resource.type=1")
return s3db.hrm_competency_controller()
# =============================================================================
def skill_competencies():
"""
Called by S3OptionsFilter to provide the competency options for a
particular Skill Type
"""
table = s3db.hrm_skill
ttable = s3db.hrm_skill_type
rtable = s3db.hrm_competency_rating
query = (table.id == request.args[0]) & \
(table.skill_type_id == ttable.id) & \
(rtable.skill_type_id == table.skill_type_id)
records = db(query).select(rtable.id,
rtable.name,
orderby=~rtable.priority)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_org_site_json():
"""
Used by the Asset - Assign to Person page
"""
table = s3db.hrm_human_resource
otable = s3db.org_organisation
query = (table.person_id == request.args[0]) & \
(table.organisation_id == otable.id)
records = db(query).select(table.site_id,
otable.id,
otable.name)
response.headers["Content-Type"] = "application/json"
return records.json()
# =============================================================================
def staff_for_site():
"""
Used by the Req/Req/Create page
- note that this returns Person IDs
"""
try:
site_id = request.args[0]
except:
result = current.xml.json_message(False, 400, "No Site provided!")
else:
table = s3db.hrm_human_resource
ptable = db.pr_person
query = (table.site_id == site_id) & \
(table.deleted == False) & \
(table.status == 1) & \
((table.end_date == None) | \
(table.end_date > request.utcnow)) & \
(ptable.id == table.person_id)
rows = db(query).select(ptable.id,
ptable.first_name,
ptable.middle_name,
ptable.last_name,
orderby=ptable.first_name)
result = []
append = result.append
for row in rows:
append({"id" : row.id,
"name" : s3_fullname(row)
})
result = json.dumps(result)
response.headers["Content-Type"] = "application/json"
return result
# =============================================================================
# Salaries
# =============================================================================
def staff_level():
""" Staff Levels Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
def salary_grade():
""" Salary Grade Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Insurance Information
# =============================================================================
def insurance():
""" Insurance Information Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Awards
# =============================================================================
def award_type():
""" Award Type Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
def award():
""" Awards Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Disciplinary Record
# =============================================================================
def disciplinary_type():
""" Disciplinary Type Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
def disciplinary_action():
""" Disciplinary Action Controller """
mode = session.s3.hrm.mode
def prep(r):
if mode is not None:
auth.permission.fail()
return True
s3.prep = prep
output = s3_rest_controller()
return output
# =============================================================================
# Messaging
# =============================================================================
def compose():
""" Send message to people/teams """
return s3db.hrm_compose()
# END =========================================================================
| 33.240933 | 111 | 0.4698 |
79560d7e6af36cc28d450cfa27cc418c04856c31 | 1,983 | py | Python | aioskybell/helpers/models.py | tkdrob/aioskybell | d07a091ecc5cf3122da9ea1f0bbdbfa3bedaac05 | [
"MIT"
] | null | null | null | aioskybell/helpers/models.py | tkdrob/aioskybell | d07a091ecc5cf3122da9ea1f0bbdbfa3bedaac05 | [
"MIT"
] | 3 | 2022-03-01T06:32:55.000Z | 2022-03-08T17:08:16.000Z | aioskybell/helpers/models.py | tkdrob/aioskybell | d07a091ecc5cf3122da9ea1f0bbdbfa3bedaac05 | [
"MIT"
] | null | null | null | """Models for Skybell."""
from __future__ import annotations
from datetime import datetime
class InfoDict(dict):
"""Class for info."""
address: str
checkedInAt: str
clientId: str
deviceId: str
essid: str
firmwareVersion: str
hardwareRevision: str
localHostname: str
mac: str
port: str
proxy_address: str
proxy_port: str
region: str
serialNo: str
status: dict[str, str]
timestamp: str
wifiBitrate: str
wifiLinkQuality: str
wifiNoise: str
wifiSignalLevel: str
wifiTxPwrEeprom: str
class DeviceDict(dict):
"""Class for device."""
acl: str
createdAt: str
deviceInviteToken: str
id: str
location: dict[str, str]
name: str
resourceId: str
status: str
type: str
updatedAt: str
user: str
uuid: str
class AvatarDict(dict):
"""Class for avatar."""
createdAt: str
url: str
class SettingsDict(dict):
"""Class for settings."""
chime_level: str | None
digital_doorbell: str | None
do_not_disturb: str | None
do_not_ring: str | None
green_b: str | None
green_g: str | None
green_r: str | None
high_front_led_dac: str | None
high_lux_threshold: str | None
led_intensity: str | None
low_front_led_dac: str | None
low_lux_threshold: str | None
med_front_led_dac: str | None
med_lux_threshold: str | None
mic_volume: str | None
motion_policy: str | None
motion_threshold: str | None
ring_tone: str | None
speaker_volume: str | None
video_profile: str | None
class EventDict(dict):
"""Class for an event."""
_id: str
callId: str
createdAt: datetime
device: str
event: str
id: str
media: str
mediaSmall: str
state: str
ttlStartDate: str
updatedAt: str
videoState: str
EventTypeDict = dict[str, EventDict]
DeviceTypeDict = dict[str, dict[str, EventTypeDict]]
DevicesDict = dict[str, DeviceTypeDict]
| 19.441176 | 52 | 0.648512 |
79560d8edc5b4902fea99693bb953e9f1af7749f | 1,210 | py | Python | note5/std_test.py | icexmoon/python-learning-notes | 838c91d896404290b89992b6517be1b6a79df41f | [
"MIT"
] | null | null | null | note5/std_test.py | icexmoon/python-learning-notes | 838c91d896404290b89992b6517be1b6a79df41f | [
"MIT"
] | null | null | null | note5/std_test.py | icexmoon/python-learning-notes | 838c91d896404290b89992b6517be1b6a79df41f | [
"MIT"
] | null | null | null | import random
print(random.random())
#生成一个随机整数
print(random.randint(1,10))
#在一个list中随机选定多个
print(random.sample([1,2,3,4,5,6],3))
# 0.9078117032490299
# 9
# [3, 2, 1]
import sys
print(sys.argv)
print(sys.path)
sys.path.append("D:\\worksapce\\python\\time_tools")
print(sys.path)
# ['D:\\workspace\\python\\python-learning-notes\\note5\\test.py']
# ['D:\\workspace\\python\\python-learning-notes\\note5', 'D:\\software\\Coding\\Python\\python39.zip', 'D:\\software\\Coding\\Python\\DLLs', 'D:\\software\\Coding\\Python\\lib', 'D:\\software\\Coding\\Python', 'C:\\Users\\70748\\AppData\\Roaming\\Python\\Python39\\site-packages', 'D:\\software\\Coding\\Python\\lib\\site-packages', 'D:\\software\\Coding\\Python\\lib\\site-packages\\you_get-0.4.1500-py3.9.egg']
# ['D:\\workspace\\python\\python-learning-notes\\note5', 'D:\\software\\Coding\\Python\\python39.zip', 'D:\\software\\Coding\\Python\\DLLs', 'D:\\software\\Coding\\Python\\lib', 'D:\\software\\Coding\\Python', 'C:\\Users\\70748\\AppData\\Roaming\\Python\\Python39\\site-packages', 'D:\\software\\Coding\\Python\\lib\\site-packages', 'D:\\software\\Coding\\Python\\lib\\site-packages\\you_get-0.4.1500-py3.9.egg', 'D:\\worksapce\\python\\time_tools']
| 67.222222 | 450 | 0.696694 |
79560de5ffe8743a70af8f6eabae540ba8ebf82d | 5,835 | py | Python | dash_bio/FornaContainer.py | shday/dash-bio-1 | 81bb6fa257febb59d7841f8c5573e7231f5a9095 | [
"MIT"
] | 2 | 2020-06-30T12:19:53.000Z | 2020-11-18T08:47:29.000Z | dash_bio/FornaContainer.py | shday/dash-bio-1 | 81bb6fa257febb59d7841f8c5573e7231f5a9095 | [
"MIT"
] | 5 | 2020-09-07T22:33:52.000Z | 2022-02-13T05:18:30.000Z | dash_bio/FornaContainer.py | shday/dash-bio-1 | 81bb6fa257febb59d7841f8c5573e7231f5a9095 | [
"MIT"
] | 1 | 2020-06-09T15:29:19.000Z | 2020-06-09T15:29:19.000Z | # AUTO GENERATED FILE - DO NOT EDIT
from dash.development.base_component import Component, _explicitize_args
class FornaContainer(Component):
"""A FornaContainer component.
FornaContainer is a force-directed graph that is used to visualize
the secondary structure of biomolecules. It is based on the fornac
library (https://github.com/ViennaRNA/fornac).
Keyword arguments:
- id (string; optional): The ID of this component, used to identify dash components in
callbacks. The ID needs to be unique across all of the
components in an app.
- height (number; default 500): The height (in px) of the container in which the molecules will
be displayed.
- width (number; default 300): The width (in px) of the container in which the molecules will
be displayed.
- sequences (dict; optional): The molecules that will be displayed. sequences has the following type: list of dicts containing keys 'sequence', 'structure', 'options'.
Those keys have the following types:
- sequence (string; required): A string representing the RNA nucleotide sequence of
the RNA molecule.
- structure (string; required): A dot-bracket string
(https://software.broadinstitute.org/software/igv/RNAsecStructure)
that specifies the secondary structure of the RNA
molecule.
- options (dict; optional): Additional options to be applied to the rendering of
the RNA molecule. options has the following type: dict containing keys 'applyForce', 'circularizeExternal', 'labelInterval', 'name', 'avoidOthers'.
Those keys have the following types:
- applyForce (boolean; optional): Indicate whether the force-directed layout will be
applied to the displayed molecule. Enabling this
option allows users to change the layout of the
molecule by selecting and dragging the individual
nucleotide nodes. True by default.
- circularizeExternal (boolean; optional): This only makes sense in connection with the
applyForce argument. If it's true, the external
loops will be arranged in a nice circle. If false,
they will be allowed to flop around as the force
layout dictates. True by default.
- labelInterval (number; optional): Change how often nucleotide numbers are labelled
with their number. 10 by default.
- name (string; optional): The molecule name; this is used in custom color
scales.
- avoidOthers (boolean; optional): Whether or not this molecule should "avoid" other
molecules in the map.
- nodeFillColor (string; optional): The fill color for all of the nodes. This will override any
color scheme defined in colorScheme.
- colorScheme (a value equal to: 'sequence', 'structure', 'positions', 'custom'; default 'sequence'): The color scheme that is used to color the nodes.
- customColors (dict; optional): The custom colors used to color the nodes if the 'custom'
option is chosen for the `colorScheme` prop.
For example, if the domain is `[0, 20]`, the range is
`['yellow', 'red']`, and the dictionary specified in
'colorValues' that corresponds to a molecule is `{'6': 10}`,
the sixth nucleotide in that molecule will have a color that is
perfectly in between yellow and red (i.e., orange), since 10 is
perfectly in between 0 and 20. customColors has the following type: dict containing keys 'domain', 'range', 'colorValues'.
Those keys have the following types:
- domain (list of numbers; optional): The limits for the color scale. This is used with the range
specified in `range` to calculate the color of a given
nucleotide, based on the number that it is assigned.
- range (list of strings; optional): The range of colors that will be used in conjunction with
the `domain` prop.
- colorValues (dict with strings as keys and values of type dict with strings as keys and values of type string | number; optional): A dictionary which contains keys, each of which are either
an empty string (`''`) or the name of a molecule that has
been defined in the `name` prop in the `options` for a
sequence in the `sequences` property.
The value corresponding to the key that is an empty string
(if that key exists) is a "default" color scheme that will
be applied first, and can be overridden by the color
schemes defined for molecule-specific keys. The
aforementioned color schemes each take the form of a
dictionary in which the keys are the nucleotide positions
and the values are either a) numbers to be normalized with
respect to the scale defined in `domain` (so that their
color will be calculated), or b) direct string
representations of colors.
- allowPanningAndZooming (boolean; default True): Allow users to zoom in and pan the display. If this is enabled,
then pressing the 'c' key on the keyboard will center the view."""
@_explicitize_args
def __init__(self, id=Component.UNDEFINED, height=Component.UNDEFINED, width=Component.UNDEFINED, sequences=Component.UNDEFINED, nodeFillColor=Component.UNDEFINED, colorScheme=Component.UNDEFINED, customColors=Component.UNDEFINED, allowPanningAndZooming=Component.UNDEFINED, **kwargs):
self._prop_names = ['id', 'height', 'width', 'sequences', 'nodeFillColor', 'colorScheme', 'customColors', 'allowPanningAndZooming']
self._type = 'FornaContainer'
self._namespace = 'dash_bio'
self._valid_wildcard_attributes = []
self.available_properties = ['id', 'height', 'width', 'sequences', 'nodeFillColor', 'colorScheme', 'customColors', 'allowPanningAndZooming']
self.available_wildcard_properties = []
_explicit_args = kwargs.pop('_explicit_args')
_locals = locals()
_locals.update(kwargs) # For wildcard attrs
args = {k: _locals[k] for k in _explicit_args if k != 'children'}
for k in []:
if k not in args:
raise TypeError(
'Required argument `' + k + '` was not specified.')
super(FornaContainer, self).__init__(**args)
| 58.939394 | 289 | 0.750129 |
79560f15741b069935a4d3c9c7a5bb3ae369da83 | 2,309 | py | Python | sitemessage/messengers/twitter.py | furins/django-sitemessage | 4cdfa0e78eb122dea835c9c4ef845f44e3a5eb90 | [
"BSD-3-Clause"
] | null | null | null | sitemessage/messengers/twitter.py | furins/django-sitemessage | 4cdfa0e78eb122dea835c9c4ef845f44e3a5eb90 | [
"BSD-3-Clause"
] | null | null | null | sitemessage/messengers/twitter.py | furins/django-sitemessage | 4cdfa0e78eb122dea835c9c4ef845f44e3a5eb90 | [
"BSD-3-Clause"
] | null | null | null | from django.utils.translation import gettext as _
from .base import MessengerBase
from ..exceptions import MessengerWarmupException
class TwitterMessenger(MessengerBase):
"""Implements to Twitter message delivery using `twitter` module.
https://github.com/sixohsix/twitter
"""
alias = 'twitter'
title = _('Tweet')
address_attr = 'twitter'
_session_started = False
def __init__(self, api_key, api_secret, access_token, access_token_secret):
"""Configures messenger.
Register Twitter application here - https://apps.twitter.com/
:param api_key: API key for Twitter client
:param api_secret: API secret for Twitter client
:param access_token: Access token for an account to tweet from
:param access_token_secret: Access token secret for an account to tweet from
"""
import twitter
self.lib = twitter
self.api_key = api_key
self.api_secret = api_secret
self.access_token = access_token
self.access_token_secret = access_token_secret
def _test_message(self, to, text):
return self._send_message(self._build_message(to, text))
def before_send(self):
try:
self.api = self.lib.Twitter(auth=self.lib.OAuth(self.access_token, self.access_token_secret, self.api_key, self.api_secret))
self._session_started = True
except self.lib.api.TwitterError as e:
raise MessengerWarmupException(f'Twitter Error: {e}')
@classmethod
def _build_message(cls, to, text):
if to:
if not to.startswith('@'):
to = f'@{to}'
to = f'{to} '
else:
to = ''
return f'{to}{text}'
def _send_message(self, msg):
return self.api.statuses.update(status=msg)
def send(self, message_cls, message_model, dispatch_models):
if self._session_started:
for dispatch_model in dispatch_models:
msg = self._build_message(dispatch_model.address, dispatch_model.message_cache)
try:
self._send_message(msg)
self.mark_sent(dispatch_model)
except Exception as e:
self.mark_error(dispatch_model, e, message_cls)
| 30.786667 | 136 | 0.637505 |
79560fc75693c1e5db85c6f9d05dc5410ac1851c | 59 | py | Python | multacdkrecipies/recipies/utils/__init__.py | u93/multa-cdkrecipies | f9ea04f2c85aa8848d21a3c93f40eaa68c3065a1 | [
"MIT"
] | 2 | 2020-08-02T05:44:32.000Z | 2020-10-02T01:38:18.000Z | multacdkrecipies/recipies/utils/__init__.py | u93/multa-cdkrecipies | f9ea04f2c85aa8848d21a3c93f40eaa68c3065a1 | [
"MIT"
] | 6 | 2021-02-06T21:05:35.000Z | 2021-04-25T18:45:48.000Z | multacdkrecipies/recipies/utils/__init__.py | u93/multa-cdkrecipies | f9ea04f2c85aa8848d21a3c93f40eaa68c3065a1 | [
"MIT"
] | 1 | 2020-10-01T00:42:09.000Z | 2020-10-01T00:42:09.000Z | from .common_functions import *
from .validations import *
| 19.666667 | 31 | 0.79661 |
79561033fdd8596e6ef8548c7992a8510657a8b4 | 9,094 | py | Python | tests/test_lease_server.py | agdsn/hades | 78782831b840188b14e37c0673a6d6e9712f64ce | [
"MIT"
] | 8 | 2015-04-27T00:41:03.000Z | 2021-11-14T17:15:26.000Z | tests/test_lease_server.py | agdsn/hades | 78782831b840188b14e37c0673a6d6e9712f64ce | [
"MIT"
] | 84 | 2015-04-27T12:15:00.000Z | 2021-11-28T19:06:23.000Z | tests/test_lease_server.py | agdsn/hades | 78782831b840188b14e37c0673a6d6e9712f64ce | [
"MIT"
] | 4 | 2015-11-14T16:20:40.000Z | 2017-09-05T00:27:49.000Z | import array
import contextlib
import logging
import mmap
import os
import socket
import struct
from io import FileIO
from typing import Callable, Dict, Generator, List, Optional, Tuple, TypeVar
import pytest
from _pytest.logging import LogCaptureFixture
from hades.leases.server import (
BufferTooSmallError, BaseParseError, ParseError, Parser, Server, UnexpectedEOFError, zip_left,
)
T = TypeVar('T')
ParserFactory = Callable[[mmap.mmap, int], Parser[T]]
Driver = Callable[[mmap.mmap, int, ParserFactory], T]
MODE_MAP = {
'rb': os.O_RDONLY,
'wb': os.O_WRONLY,
'rb+': os.O_RDWR,
}
@pytest.fixture(
params=[["rb"], ["wb"], ["rb+"], ["rb", "wb", "rb+"]],
)
def files(request) -> List[FileIO]:
with contextlib.ExitStack() as stack:
# Must use closefd=False, because parse_ancillary_data will return
# streams with closefd=True.
# noinspection PyTypeChecker
yield [
stack.enter_context(os.fdopen(
os.open(os.devnull, flags=MODE_MAP[mode]),
mode=mode,
buffering=0,
closefd=False,
))
for mode in request.param
]
def test_parse_ancillary_data(files: List[FileIO]):
with contextlib.ExitStack() as stack:
for file in files:
stack.callback(os.close, file.fileno())
data = [(
socket.SOL_SOCKET,
socket.SCM_RIGHTS,
array.array("i", map(FileIO.fileno, files)).tobytes(),
)]
streams = Server.parse_ancillary_data(data)
stack.pop_all()
assert [
(stream.fileno(), stream.mode) for stream in streams
] == [
(file.fileno(), file.mode) for file in files
]
def test_parse_ancillary_data_unknown(caplog: LogCaptureFixture):
data = [(
socket.SOL_SOCKET,
socket.SCM_CREDENTIALS,
struct.pack("=iII", 1, 0, 0),
)]
streams = Server.parse_ancillary_data(data)
assert len(streams) == 0
assert caplog.record_tuples == [(
Server.__module__,
logging.WARNING,
"Received unexpected control message: level=%d type=%d" % (
socket.SOL_SOCKET, socket.SCM_CREDENTIALS,
),
)]
def create_buffer(size: int = mmap.PAGESIZE):
return mmap.mmap(
-1,
size,
mmap.MAP_PRIVATE,
mmap.PROT_READ | mmap.PROT_WRITE,
)
@pytest.fixture(
params=[0, 255],
ids=lambda offset: 'offset={:d}'.format(offset),
)
def buffer(request) -> Generator[mmap.mmap, None, None]:
with create_buffer() as b:
b.seek(request.param, os.SEEK_SET)
yield b
def drive_at_once(buffer: mmap.mmap, size: int, parser: ParserFactory[T]) -> T:
"""Pass all data to a parser"""
generator = parser(buffer, size)
with contextlib.closing(generator):
try:
needed = next(generator)
except StopIteration as e:
assert id(e.value[0]) == id(buffer)
assert e.value[1] == size
return e.value[2]
except BaseParseError as e:
raise e.with_offset(buffer.tell())
else:
offset = buffer.tell()
if needed > len(buffer):
generator.throw(BufferTooSmallError(needed, len(buffer), offset=offset))
else:
generator.throw(UnexpectedEOFError(needed, size - offset, offset=offset))
def drive_minimal(buffer: mmap.mmap, size: int, parser: ParserFactory[T]) -> T:
"""Pass only the minimum number of requested bytes to a parser"""
end = size
size = buffer.tell()
generator = parser(buffer, size)
needed = next(generator)
with contextlib.closing(generator):
while buffer.tell() + needed <= end:
try:
size = buffer.tell() + needed
needed = generator.send((buffer, size))
except StopIteration as e:
assert id(e.value[0]) == id(buffer)
assert e.value[1] == size
return e.value[2]
except BaseParseError as e:
raise e.with_offset(buffer.tell())
offset = buffer.tell()
if needed > len(buffer):
generator.throw(BufferTooSmallError(needed, len(buffer), offset=offset))
else:
generator.throw(UnexpectedEOFError(needed, end - offset, offset=offset))
@pytest.fixture(scope='session', params=[drive_at_once, drive_minimal])
def driver(request) -> Driver[T]:
return request.param
def fill_buffer(buffer: mmap.mmap, value: bytes) -> int:
start = buffer.tell()
buffer.write(value)
size = buffer.tell()
# Trailing zero byte
buffer.write_byte(0)
buffer.seek(start, os.SEEK_SET)
return size
@pytest.mark.parametrize(
"value", [0, 1, 2, 3, 4, -1],
)
def test_parse_valid_int(driver: Driver[int], buffer: mmap.mmap, value: int):
size = fill_buffer(buffer, struct.pack("=i", value))
parsed_value = driver(buffer, size, Server.parse_int)
assert parsed_value == value
assert buffer.tell() == size
def test_parse_int_eof(driver: Driver[int], buffer: mmap.mmap):
offset = buffer.tell()
serialized = struct.pack("=i", -1)
end = len(serialized) // 2
size = fill_buffer(buffer, serialized[:end])
with pytest.raises(UnexpectedEOFError) as e:
driver(buffer, size, Server.parse_int)
assert e.value.element == "int"
assert e.value.offset == offset
def test_parse_int_buffer_too_small(driver: Driver[int]):
value = struct.pack("=i", -1)
size = len(value) // 2
with create_buffer(size) as buffer:
buffer[:] = value[:size]
with pytest.raises(BufferTooSmallError) as e:
driver(buffer, size, Server.parse_int)
assert e.value.element == "int"
assert e.value.offset == 0
@pytest.mark.parametrize(
"value",
[b"test", b"", bytes(range(0x01, 0x100))],
ids=("test", "empty string", "all bytes")
)
def test_parse_valid_string(
driver: Driver[bytes],
buffer: mmap.mmap,
value: bytes,
):
size = fill_buffer(buffer, value + b"\x00")
parsed_value = driver(buffer, size, Server.parse_string)
assert parsed_value == value
assert buffer.tell() == size
def test_parse_string_eof(driver: Driver[bytes], buffer: mmap.mmap):
offset = buffer.tell()
size = fill_buffer(buffer, b"test")
with pytest.raises(UnexpectedEOFError) as e:
driver(buffer, size, Server.parse_string)
assert e.value.element == "string"
assert e.value.offset == offset
def test_parse_string_buffer_too_small(driver: Driver[bytes]):
value = b"test"
size = len(value)
with create_buffer(size) as buffer:
buffer[:] = value
with pytest.raises(BufferTooSmallError) as e:
driver(buffer, size, Server.parse_string)
assert e.value.element == "string"
assert e.value.offset == 0
def serialize_request(
argv: List[bytes],
environ: Dict[bytes, bytes],
argc: Optional[int] = None,
envc: Optional[int] = None,
) -> bytes:
return b"".join([
struct.pack("=i", len(argv) if argc is None else argc),
] + [
arg + b"\x00" for arg in argv
] + [
struct.pack("=i", len(environ) if envc is None else envc),
] + [
k + b"=" + v + b"\x00" for k, v in environ.items()
])
@pytest.mark.parametrize(
"argv,environ",
[
([], {}),
([b"arg0", b"add"], {b"DNSMASQ_ENV": b"1"}),
],
)
def test_parse_valid_request(
driver: Driver[Tuple[List[bytes], Dict[bytes, bytes]]],
buffer: mmap.mmap,
argv: List[bytes],
environ: Dict[bytes, bytes],
):
size = fill_buffer(buffer, serialize_request(argv, environ))
got_argv, got_environ = driver(buffer, size, Server.parse_request)
assert (argv, environ) == (got_argv, got_environ)
def test_parse_negative_argc(
driver: Driver[Tuple[List[bytes], Dict[bytes, bytes]]],
buffer: mmap.mmap,
):
size = fill_buffer(buffer, serialize_request([], {}, -1))
with pytest.raises(ParseError):
driver(buffer, size, Server.parse_request)
def test_parse_overflow_argc(
driver: Driver[Tuple[List[bytes], Dict[bytes, bytes]]],
buffer: mmap.mmap,
):
size = fill_buffer(buffer, serialize_request([], {}, 1, -1))
with pytest.raises(UnexpectedEOFError) as e:
driver(buffer, size, Server.parse_request)
assert e.value.element == "argv[0]"
def test_parse_overflow_envc(
driver: Driver[Tuple[List[bytes], Dict[bytes, bytes]]],
buffer: mmap.mmap,
):
size = fill_buffer(buffer, serialize_request([], {}, None, 1))
with pytest.raises(UnexpectedEOFError) as e:
driver(buffer, size, Server.parse_request)
assert e.value.element == "environ[0]"
def test_zip_left():
assert list(zip_left("abc", "a", rfill="X")) == [
("a", "a"),
("b", "X"),
("c", "X"),
]
assert list(zip_left("abc", "abcde", rfill="X")) == [
("a", "a"),
("b", "b"),
("c", "c"),
]
| 29.241158 | 98 | 0.609083 |
795611b2643730f5064581ee727f908774de1454 | 841 | py | Python | code-everyday-challenge/n07_day.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null | code-everyday-challenge/n07_day.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null | code-everyday-challenge/n07_day.py | ved93/deliberate-practice-challenges | 2fccdbb9d2baaa16f888055c081a8d04804c0045 | [
"MIT"
] | null | null | null |
#fibonacci pisano period
import sys
def get_fibonacci_huge(n,m):
if n <= 1:
return n
a=pisanoperiod(get_fibonacci(m))
previous=0
current =1
g = n%a
if g <= 1:
return g
for i in range(g-1):
previous,current = current,previous+current
if i%10 ==0:
previous,current =previous,current
return current% m
def pisanoperiod(a):
index =0
for i in range(1,len(a)):
if a[i]==0 & (a[i+1]==1):
break
index = i
return index
def get_fibonacci(m):
x = m*m+1
a = []
a0 = 0
a1 = 1
a.append(a0)
a.append(a1)
for i in range(x):
a0,a1 = a1,(a0+a1)%m
a.append(a1)
return a
if __name__ == "__main__":
n,m = map(int, input().split())
print(get_fibonacci_huge(n,m)) | 15.017857 | 51 | 0.51962 |
795611b379557f9804e80ce00bfdbd777f2f116f | 770 | py | Python | resources/discovery.py | fragaria/gap-resources | fdf0084f6cbd014b788209d288ea3793493a8208 | [
"MIT"
] | null | null | null | resources/discovery.py | fragaria/gap-resources | fdf0084f6cbd014b788209d288ea3793493a8208 | [
"MIT"
] | null | null | null | resources/discovery.py | fragaria/gap-resources | fdf0084f6cbd014b788209d288ea3793493a8208 | [
"MIT"
] | null | null | null | from google.appengine.ext import ndb
from gap.utils.imports import import_class
from register import register
from resources.resource import Resource
def discover_models(modules):
for module in modules:
module = import_class(module)
if module.__class__ == 'module':
for model in [m for m in module.__dict__.values() if isinstance(m, ndb.model.MetaModel)]:
register(model)
elif isinstance(module, ndb.model.MetaModel): # the module is actualy a model
register(module)
elif issubclass(module, Resource):
register(module.model, module)
else:
raise TypeError("Expected modul, resource or model but got %s.%s" % (module.__module__, module.__class__.__name__))
| 33.478261 | 127 | 0.675325 |
795611e9f93debce6fea4485ed430f870d89d994 | 1,436 | py | Python | pyramid_debugtoolbar/panels/renderings.py | rollbar/pyramid_debugtoolbar | dab4278eb68b801b1d3e9679cf1308096c3f849f | [
"Apache-2.0"
] | null | null | null | pyramid_debugtoolbar/panels/renderings.py | rollbar/pyramid_debugtoolbar | dab4278eb68b801b1d3e9679cf1308096c3f849f | [
"Apache-2.0"
] | null | null | null | pyramid_debugtoolbar/panels/renderings.py | rollbar/pyramid_debugtoolbar | dab4278eb68b801b1d3e9679cf1308096c3f849f | [
"Apache-2.0"
] | 1 | 2021-02-21T12:18:04.000Z | 2021-02-21T12:18:04.000Z | from pyramid_debugtoolbar.panels import DebugPanel
from pyramid_debugtoolbar.utils import dictrepr
from pyramid_debugtoolbar.compat import text_
_ = lambda x: x
class RenderingsDebugPanel(DebugPanel):
"""
Panel that displays the renderers (templates and 'static' renderers such
as JSON) used during a request.
"""
name = 'Template'
renderings = ()
template = 'pyramid_debugtoolbar.panels:templates/renderings.dbtmako'
@property
def has_content(self):
return bool(self.renderings)
def process_beforerender(self, event):
if not self.renderings:
self.renderings = []
name = event['renderer_info'].name
if name and name.startswith('pyramid_debugtoolbar'):
return
val = getattr(event, 'rendering_val', '<unknown>')
try:
val = repr(val)
except:
# crazyass code raises an exception during __repr__ (formish)
val = '<unknown>'
self.renderings.append(
dict(name=name, system=dictrepr(event), val=text_(val, 'utf-8'))
)
def nav_title(self):
return _('Renderers')
def nav_subtitle(self):
num = len(self.renderings)
return '%d' % (num)
def title(self):
return _('Renderers')
def url(self):
return ''
def process_response(self, response):
self.data = {'renderings': self.renderings}
| 27.615385 | 76 | 0.625348 |
7956120bad327dc93b38b9ba47682f0bac200a70 | 1,437 | py | Python | lldb/packages/Python/lldbsuite/test/functionalities/history/TestHistoryRecall.py | tkf/opencilk-project | 48265098754b785d1b06cb08d8e22477a003efcd | [
"MIT"
] | 2 | 2019-05-24T14:10:24.000Z | 2019-05-24T14:27:38.000Z | packages/Python/lldbsuite/test/functionalities/history/TestHistoryRecall.py | DalavanCloud/lldb | e913eaf2468290fb94c767d474d611b41a84dd69 | [
"Apache-2.0"
] | 10 | 2018-05-27T23:16:42.000Z | 2019-09-30T13:28:45.000Z | packages/Python/lldbsuite/test/functionalities/history/TestHistoryRecall.py | DalavanCloud/lldb | e913eaf2468290fb94c767d474d611b41a84dd69 | [
"Apache-2.0"
] | 3 | 2019-12-21T06:35:35.000Z | 2020-06-07T23:18:58.000Z | """
Make sure the !N and !-N commands work properly.
"""
from __future__ import print_function
import os
import time
import re
import lldb
import lldbsuite.test.lldbutil as lldbutil
from lldbsuite.test.lldbtest import *
class TestHistoryRecall(TestBase):
mydir = TestBase.compute_mydir(__file__)
# If your test case doesn't stress debug info, the
# set this to true. That way it won't be run once for
# each debug info format.
NO_DEBUG_INFO_TESTCASE = True
def test_history_recall(self):
"""Test the !N and !-N functionality of the command interpreter."""
self.sample_test()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
def sample_test(self):
interp = self.dbg.GetCommandInterpreter()
result = lldb.SBCommandReturnObject()
interp.HandleCommand("command history", result, True)
interp.HandleCommand("platform list", result, True)
interp.HandleCommand("!0", result, False)
self.assertTrue(result.Succeeded(), "!0 command did not work: %s"%(result.GetError()))
self.assertTrue("command history" in result.GetOutput(), "!0 didn't rerun command history")
interp.HandleCommand("!-1", result, False)
self.assertTrue(result.Succeeded(), "!-1 command did not work: %s"%(result.GetError()))
self.assertTrue("host:" in result.GetOutput(), "!-1 didn't rerun platform list.")
| 31.23913 | 99 | 0.677105 |
795612284446b131d597c5b9d3a1eb79c32b705f | 5,865 | py | Python | experiments/image_experiments.py | kckishan/Depth_and_Dropout | 64bbff9169d588486d92946485e108342daa29b0 | [
"MIT"
] | 1 | 2021-11-19T06:43:12.000Z | 2021-11-19T06:43:12.000Z | experiments/image_experiments.py | kckishan/Depth_and_Dropout | 64bbff9169d588486d92946485e108342daa29b0 | [
"MIT"
] | null | null | null | experiments/image_experiments.py | kckishan/Depth_and_Dropout | 64bbff9169d588486d92946485e108342daa29b0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
from tqdm import tqdm
import os
import torch
import torchvision
import torchvision.transforms as transforms
from torch import nn
from torch.nn import functional as F
from torch.utils.data import DataLoader
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.style.use("default")
import seaborn as sns
sns.set_style("ticks")
import sys
sys.path.append("../")
from src.models.CNN import AdaptiveConvNet
from src.utils import get_device, plot_network_mask
import argparse
def argument_parser():
parser = argparse.ArgumentParser(description="Run Nonparametric Bayesian Architecture Learning")
parser.add_argument('--use-cuda', action='store_false',
help="Use CPU or GPU")
parser.add_argument("--prior_temp", type=float, default=1.,
help="Temperature for Concrete Bernoulli from prior")
parser.add_argument("--temp", type=float, default=.5,
help="Temperature for Concrete Bernoulli from posterior")
parser.add_argument("--epsilon", type=float, default=0.01,
help="Epsilon to select the activated layers")
parser.add_argument("--truncation_level", type=int, default=10,
help="K+: Truncation for Z matrix")
parser.add_argument("--a_prior", type=float, default=1.1,
help="a parameter for Beta distribution")
parser.add_argument("--b_prior", type=float, default=10.,
help="b parameter for Beta distribution")
parser.add_argument("--kernel", type=int, default=5,
help="Kernel size. Default is 3.")
parser.add_argument("--num_samples", type=int, default=5,
help="Number of samples of Z matrix")
parser.add_argument("--epochs", type=int, default=50,
help="Number of training epochs.")
parser.add_argument("--lr", type=float, default=0.003,
help="Learning rate.")
parser.add_argument("--l2", type=float, default=1e-6,
help="Coefficient of weight decay.")
parser.add_argument("--batch_size", type=float, default=64,
help="Batch size.")
parser.add_argument("--max_width", type=int, default=64,
help="Dimension of hidden representation.")
return parser.parse_known_args()[0]
args = argument_parser()
transform_train = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
# Normalize the test set same as training set without augmentation
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
train_dataset = torchvision.datasets.MNIST(root='./data', train=True, transform=transform_train, download=True)
test_dataset = torchvision.datasets.MNIST(root='./data', train=False, download=True, transform=transform_test)
# Data Loader (Input Pipeline)
train_loader = torch.utils.data.DataLoader(dataset=train_dataset, batch_size=args.batch_size, num_workers=4, shuffle=True)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=args.batch_size, num_workers=4, shuffle=False)
device = get_device(args)
model = AdaptiveConvNet(input_channels=1,
num_classes=10,
num_channels=args.max_width,
kernel_size=args.kernel,
args=args,
device=device).to(device)
model = model.to(device)
print(model)
loss_fn = nn.CrossEntropyLoss(reduction="none")
optimizer = torch.optim.AdamW(model.parameters(), args.lr, weight_decay=args.l2)
if not os.path.exists("results"):
os.mkdir("results")
def evaluate(test_loader):
loglike = 0
error_sum = 0
with torch.no_grad():
model.eval()
for i, (data, labels) in enumerate(test_loader):
data = data.float().to(device)
labels = labels.long().to(device)
output = model(data, args.num_samples)
pred = output.mean(0)
logits = F.softmax(pred, dim=1)
ll = -F.nll_loss(logits, labels, reduction="sum").item()
loglike += ll
predicted = torch.argmax(logits, 1)
error = predicted.ne(labels).sum().item()
error_sum += error
test_loglikes = loglike / len(test_dataset)
test_err = error_sum / len(test_dataset)
test_metrics = {'test_err': round(test_err * 100, 3),
'test_like': round(test_loglikes, 3)}
return test_metrics
train_losses = []
with tqdm(range(args.epochs)) as tq:
for epoch in tq:
train_loss = 0.0
model.train()
for i, (data, labels) in enumerate(train_loader):
data = data.float().to(device)
labels = labels.long().to(device)
# making grad zero
optimizer.zero_grad()
# sample an architecture
act_vec = model(data, args.num_samples)
loss = model.estimate_ELBO(loss_fn, act_vec, labels, N_train=len(train_dataset), kl_weight=1)
loss.backward()
optimizer.step()
# adding losses
train_loss += loss.item()
train_loss = train_loss / len(train_loader)
train_losses.append(train_loss)
test_results = evaluate(test_loader)
print("Test error: {} Test Log likelihood: {}".format(test_results['test_err'], test_results['test_like']))
kl_beta = model.structure_sampler.get_kl()
tq.set_postfix({'Tr. loss': '%.6f' % train_loss, 'KL Beta': '%.6f' % kl_beta})
torch.save(model, "results/model_MNIST.pt")
| 37.596154 | 122 | 0.619437 |
7956144083ffb2ec2ad5576682871465cb431994 | 10,623 | py | Python | Controller/pesquisa_servico.py | felipezago/ControleEstoque | 229659c4f9888fd01df34375ec92af7a1f734d10 | [
"MIT"
] | null | null | null | Controller/pesquisa_servico.py | felipezago/ControleEstoque | 229659c4f9888fd01df34375ec92af7a1f734d10 | [
"MIT"
] | null | null | null | Controller/pesquisa_servico.py | felipezago/ControleEstoque | 229659c4f9888fd01df34375ec92af7a1f734d10 | [
"MIT"
] | null | null | null | from PyQt5.QtWidgets import QMessageBox, QTableWidgetItem, QMainWindow
from PyQt5.QtCore import Qt
from Model.Servicos import Servicos
from PyQt5 import QtGui
from PyQt5 import QtCore
class EventFilter(QtCore.QObject):
def __init__(self, parent=None):
QtCore.QObject.__init__(self, parent)
def eventFilter(self, obj, event):
if event.type() == QtCore.QEvent.ActivationChange:
if self.parent().isActiveWindow():
if obj.adicionando:
obj.dados_tabela()
return QtCore.QObject.eventFilter(self, obj, event)
class PesquisaServico(QMainWindow):
def __init__(self, parent=None):
super(PesquisaServico, self).__init__(parent)
from View.pesquisa_servicos import Ui_Frame
self.ui = Ui_Frame()
self.ui.setupUi(self)
self.dialogs = list()
self.tamanho_tela = self.size()
self.setFixedSize(self.tamanho_tela)
self.setWindowIcon(QtGui.QIcon("Imagens/logo_fzr.png"))
self.servico_selecionado = Servicos()
self.linha_selecionada = None
self.filtrado = False
self.adicionando = False
self.tela_venda = parent
self.setWindowModality(QtCore.Qt.ApplicationModal)
self.installEventFilter(EventFilter(self))
# ação dos botoes
self.ui.bt_refresh.clicked.connect(self.dados_tabela)
self.ui.bt_selecionar.clicked.connect(self.sair)
self.ui.bt_inserir.clicked.connect(self.add)
# ação da busca
self.ui.bt_busca_servicos.clicked.connect(self.buscar)
self.ui.tx_busca_servicos.returnPressed.connect(self.buscar)
# signals
self.ui.tb_servicos.cellClicked.connect(self.linha_clicada)
self.ui.tb_servicos.cellDoubleClicked.connect(self.linha_clicada)
self.ui.tx_busca_servicos.textChanged.connect(self.formatar_texto)
self.ui.cb_servicos.currentIndexChanged.connect(self.limpa_campo_busca)
for i in range(0, 3):
self.ui.tb_servicos.horizontalHeaderItem(i).setTextAlignment(Qt.AlignLeft | Qt.AlignVCenter)
self.ui.tb_servicos.setColumnWidth(0, 30)
self.ui.tb_servicos.setColumnWidth(1, 250)
self.ui.tb_servicos.setColumnWidth(2, 100)
self.preenche_combo()
self.dados_tabela()
def add(self):
from Controller.cadastro_servicos import CadastroServicos
from Funcoes.utils import exec_app
self.adicionando = True
c_serv = CadastroServicos()
exec_app(c_serv)
self.dialogs.append(c_serv)
def limpa_campo_busca(self):
self.ui.tx_busca_servicos.setText("")
def resizeEvent(self, a0):
self.setFixedSize(self.tamanho_tela)
def formatar_texto(self):
texto = self.ui.tx_busca_servicos.text()
tamanho = len(texto)
if self.ui.cb_servicos.currentIndex() == 0:
if not texto[tamanho - 1:tamanho].isnumeric():
self.ui.tx_busca_servicos.setText(texto[:tamanho - 1])
if self.ui.cb_servicos.currentIndex() in (2, 3, 4):
if not texto[tamanho - 1:tamanho].isnumeric():
if texto[tamanho - 1:tamanho] != '.':
self.ui.tx_busca_servicos.setText(texto[:tamanho - 1])
if texto.count(".") > 1 and texto[tamanho - 1:tamanho] == '.':
self.ui.tx_busca_servicos.setText(texto[:tamanho - 1])
def preenche_combo(self):
self.ui.cb_servicos.clear()
self.ui.cb_servicos.addItem("ID")
self.ui.cb_servicos.addItem("DESCRIÇÃO")
self.ui.cb_servicos.addItem("PREÇO >")
self.ui.cb_servicos.addItem("PREÇO <")
self.ui.cb_servicos.addItem("PREÇO =")
def sair(self):
self.close()
def closeEvent(self, event: QtGui.QCloseEvent):
if self.servico_selecionado.id is not None:
self.tela_venda.codigo_item = self.servico_selecionado.id
self.tela_venda.ui.tx_busca_item.setText(f"{self.tela_venda.codigo_item}")
self.tela_venda.ui.tx_busca_item.setFocus()
self.tela_venda.recebeu_codigo_item = True
else:
box = QMessageBox()
box.setIcon(QMessageBox.Question)
box.setWindowTitle('Sair?')
box.setText('Tem certeza que deseja sair sem informar o código?')
box.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
button_sim = box.button(QMessageBox.Yes)
button_sim.setText('Sim')
button_nao = box.button(QMessageBox.No)
button_nao.setText('Não')
box.exec_()
if box.clickedButton() == button_sim:
event.accept()
else:
event.ignore()
def buscar(self):
self.ui.tb_servicos.clearContents()
self.ui.tb_servicos.setRowCount(0)
serv = Servicos()
dados = ""
if self.ui.cb_servicos.currentIndex() == 1:
serv.descricao = self.ui.tx_busca_servicos.text()
if serv.descricao:
dados = Servicos.get_servicos_by_desc(serv.descricao.upper())
else:
QMessageBox.warning(self, "Atenção!", "Favor informar algum valor!")
self.dados_tabela()
return
elif self.ui.cb_servicos.currentIndex() == 0:
if self.ui.tx_busca_servicos.text():
serv.id = int(self.ui.tx_busca_servicos.text())
dados = serv.get_servico_by_id()
else:
QMessageBox.warning(self, "Atenção!", "Favor informar algum valor!")
self.dados_tabela()
return
else:
texto = self.ui.tx_busca_servicos.text()
if texto:
if texto.count('.') >= 1:
indice_ponto = texto.find('.')
if texto[indice_ponto + 1:indice_ponto + 2] == '0':
self.ui.tx_busca_servicos.setText(texto[:indice_ponto])
serv.preco = float(self.ui.tx_busca_servicos.text())
if self.ui.cb_servicos.currentIndex() == 2:
dados = serv.get_servico_by_preco(">=")
elif self.ui.cb_servicos.currentIndex() == 3:
dados = serv.get_servico_by_preco("<=")
elif self.ui.cb_servicos.currentIndex() == 4:
dados = serv.get_servico_by_preco("=")
else:
QMessageBox.warning(self, "Atenção!", "Favor informar algum valor!")
self.dados_tabela()
return
if dados:
self.filtrado = True
self.ui.bt_refresh.setEnabled(True)
if type(dados) == list:
for i, linha in enumerate(dados):
item_id = QTableWidgetItem(str(linha[0]))
item_id.setTextAlignment(Qt.AlignLeft | Qt.AlignVCenter)
item_desc = QTableWidgetItem(linha[1])
item_desc.setTextAlignment(Qt.AlignLeft | Qt.AlignVCenter)
item_preco = QTableWidgetItem(str(linha[2]))
item_preco.setTextAlignment(Qt.AlignLeft | Qt.AlignVCenter)
self.ui.tb_servicos.insertRow(i)
for c in range(0, 3):
item = str(linha[c])
self.ui.tb_servicos.setItem(i, c, QTableWidgetItem(item))
else:
self.ui.tb_servicos.insertRow(0)
for c in range(0, 3):
item = str(dados[c])
self.ui.tb_servicos.setItem(0, c, QTableWidgetItem(item))
else:
QMessageBox.warning(self, "Erro", "Não foi encontrado nenhum registro!")
self.ui.tx_busca_servicos.setText("")
self.dados_tabela()
self.ui.tb_servicos.selectRow(0)
def linha_clicada(self):
tb = self.ui.tb_servicos
self.linha_selecionada = tb.currentRow()
self.servico_selecionado.id = int(tb.item(tb.currentRow(), 0).text())
c = self.servico_selecionado.get_servico_by_id()
self.servico_selecionado.descricao = c[1]
self.servico_selecionado.preco = c[2]
def dados_tabela(self):
self.servico_selecionado.id = None
self.ui.tx_busca_servicos.setText("")
if self.adicionando and not self.filtrado:
novo_serv = Servicos()
novo_id = novo_serv.ultimo_servico()
novo_serv.id = novo_id[0][0]
novo_serv.get_servico_by_id()
novo_serv.descricao = novo_serv.get_servico_by_id()[1]
novo_serv.preco = novo_serv.get_servico_by_id()[2]
item_id = QTableWidgetItem(str(novo_serv.id))
item_id.setTextAlignment(Qt.AlignLeft | Qt.AlignVCenter)
item_desc = QTableWidgetItem(novo_serv.descricao)
item_desc.setTextAlignment(Qt.AlignLeft | Qt.AlignVCenter)
item_preco = QTableWidgetItem(str(novo_serv.preco))
item_preco.setTextAlignment(Qt.AlignLeft | Qt.AlignVCenter)
self.ui.tb_servicos.insertRow(self.ui.tb_servicos.rowCount())
self.ui.tb_servicos.setItem(self.ui.tb_servicos.rowCount() - 1, 0, item_id)
self.ui.tb_servicos.setItem(self.ui.tb_servicos.rowCount() - 1, 1, item_desc)
self.ui.tb_servicos.setItem(self.ui.tb_servicos.rowCount() - 1, 2, item_preco)
self.ui.tb_servicos.selectRow(self.ui.tb_servicos.rowCount() - 1)
self.adicionando = False
else:
self.filtrado = False
self.ui.bt_refresh.setEnabled(False)
self.ui.tb_servicos.clearContents()
self.ui.tb_servicos.setRowCount(0)
dados = Servicos.get_todos_servicos()
for i, linha in enumerate(dados):
item_id = QTableWidgetItem(str(linha[0]))
item_id.setTextAlignment(Qt.AlignLeft | Qt.AlignVCenter)
item_desc = QTableWidgetItem(str(linha[1]))
item_desc.setTextAlignment(Qt.AlignLeft | Qt.AlignVCenter)
item_preco = QTableWidgetItem(str(linha[2]))
item_preco.setTextAlignment(Qt.AlignLeft | Qt.AlignVCenter)
self.ui.tb_servicos.insertRow(i)
for c in range(0, 3):
item = str(linha[c])
self.ui.tb_servicos.setItem(i, c, QTableWidgetItem(item))
if self.adicionando:
self.ui.tb_servicos.selectRow(self.ui.tb_servicos.rowCount() - 1)
self.adicionando = False
| 38.770073 | 104 | 0.603973 |
7956144c2313bfc249ff2ca11e8932687e81a7a0 | 236 | py | Python | tests/test_pylogging.py | twocucao/YaPyLib | 8fe56f35b9f45d3c7f688ab5842c3a1e50688e01 | [
"MIT"
] | 2 | 2017-05-21T01:58:37.000Z | 2018-02-23T15:35:14.000Z | tests/test_pylogging.py | twocucao/YaPyLib | 8fe56f35b9f45d3c7f688ab5842c3a1e50688e01 | [
"MIT"
] | 1 | 2021-06-08T19:12:08.000Z | 2021-06-08T19:12:08.000Z | tests/test_pylogging.py | twocucao/YaPyLib | 8fe56f35b9f45d3c7f688ab5842c3a1e50688e01 | [
"MIT"
] | null | null | null | from yapylib.logging import get_logger
def test_get_logger():
get_logger().debug("测试 DEBUG ")
get_logger().info("测试 INFO ")
get_logger().critical("测试 CRITICAL ")
get_logger().error("测试 ERROR ")
assert True == True
| 23.6 | 41 | 0.669492 |
795614cd7764f4e566c0653a9fcc2de725de22fe | 121,357 | py | Python | PILasOPENCV.py | bunkahle/PILasOPENCV | 5acfe7279a88ddfb09f431ffec369a527885072e | [
"MIT"
] | 19 | 2019-05-05T07:37:10.000Z | 2021-12-09T15:52:51.000Z | PILasOPENCV.py | bunkahle/PILasOPENCV | 5acfe7279a88ddfb09f431ffec369a527885072e | [
"MIT"
] | 5 | 2019-04-09T23:54:17.000Z | 2021-05-16T08:31:22.000Z | PILasOPENCV.py | bunkahle/PILasOPENCV | 5acfe7279a88ddfb09f431ffec369a527885072e | [
"MIT"
] | 6 | 2019-04-08T12:33:34.000Z | 2021-05-15T14:57:50.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import cv2
try:
import gif2numpy
gif2numpy_installed = True
except:
gif2numpy_installed = False
try:
import numpy2gif
numpy2gif_installed = True
except:
numpy2gif_installed = False
import re, os, sys, tempfile
import numbers
try:
import mss
import mss.tools
mss_installed = True
except:
mss_installed = False
from io import StringIO
try:
import ctypes
from ctypes.wintypes import WORD, DWORD, LONG
bitmap_classes_ok = True
except:
bitmap_classes_ok = False
try:
import freetype
freetype_installed = True
except:
freetype_installed = False
__author__ = 'imressed, bunkus'
VERSION = "2.9"
"""
Version history:
2.9: New functions of ImageEnhance Brightness and Contrast implemented
2.8: In case an image file does not exist which shall be opened there will be an exception raised
2.7: Bugfix when drawing text and lines or other draw objects the lines were not drawn, fixed
2.6: Bugfix for method show: Old windows were not deleted so it came to display errors, fixed
2.5: Bugfixes for coordinates which were given as float instead of integers when drawing polygons, texts, lines, points, rectangles
bugfix for composite when alphamask and images had not the same amount of channels
bugfix in floodfill when value was given as single integer
2.4: Caught several exceptions in case dependencies modules are not installed you can still work with the basic functions,
ImageDraw method bitmap implemented, ImageChops method screen implemented, saves now single or multiple frames in gif files
2.3: Updated the module for gif2numpy Version 1.2
2.2: Bugfix for Python3 on file objects, multiple frames from gifs can be loaded now and can be retrieved with seek(frame)
2.1: though OpenCV does not support gif images, PILasOPENCV now can load gif images by courtesy of the library gif2numpy
2.0: disabled ImageGrab.grabclipboard() in case it throws exceptions which happens e.g. on Ubuntu/Linux
1.9: disabled ImageGrab.grabclipboard() which throws exceptions on some platforms
1.8: ImageGrab.grab() and ImageGrab.grabclipboard() implemented with dependency on mss
1.7: fixed fromarray
1.6: fixed frombytes, getdata, putdata and caught exception in case freetype-py is not installed or dll is missing
"""
if sys.version[0] == "2":
py3 = False
basstring = basestring
fil_object = file
import cStringIO
from operator import isNumberType as isNumberTyp
from operator import isSequenceType as isSequenceTyp
else:
py3 = True
basstring = str
from io import IOBase
import collections
fil_object = IOBase
def isNumberTyp(obj):
return isinstance(obj, numbers.Number)
def isSequenceTyp(obj):
return isinstance(obj, collections.abc.Sequence)
NONE = 0
MAX_IMAGE_PIXELS = int(1024 * 1024 * 1024 // 4 // 3)
# transpose
FLIP_LEFT_RIGHT = 0
FLIP_TOP_BOTTOM = 1
ROTATE_90 = 2
ROTATE_180 = 3
ROTATE_270 = 4
TRANSPOSE = 5
TRANSVERSE = 6
# transforms
AFFINE = 0
EXTENT = 1
PERSPECTIVE = 2
QUAD = 3
MESH = 4
# resampling filters
NEAREST = NONE = 0
BOX = 4
BILINEAR = LINEAR = 2
HAMMING = 5
BICUBIC = CUBIC = 3
LANCZOS = ANTIALIAS = 1
# dithers
NEAREST = NONE = 0
ORDERED = 1 # Not yet implemented
RASTERIZE = 2 # Not yet implemented
FLOYDSTEINBERG = 3 # default
# palettes/quantizers
WEB = 0
ADAPTIVE = 1
MEDIANCUT = 0
MAXCOVERAGE = 1
FASTOCTREE = 2
LIBIMAGEQUANT = 3
# categories
NORMAL = 0
SEQUENCE = 1
CONTAINER = 2
NEAREST = cv2.INTER_NEAREST # 0
BILINEAR = INTER_LINEAR = cv2.INTER_LINEAR # 1
BICUBIC = cv2.INTER_CUBIC # 2
LANCZOS = cv2.INTER_LANCZOS4 # 4
INTERAREA = cv2.INTER_AREA # 3
# --------------------------------------------------------------------
# Registries
ID = []
OPEN = {}
MIME = {}
SAVE = {}
SAVE_ALL = {}
EXTENSION = {".bmp": "BMP", ".dib": "DIB", ".jpeg": "JPEG", ".jpg": "JPEG", ".jpe": "JPEG", ".jp2": "JPEG2000", ".png": "PNG",
".webp": "WEBP", ".pbm": "PBM", ".pgm": "PGM", ".ppm": "PPM", ".sr": "SR", ".ras": "RAS", ".tif": "TIFF", ".tiff": "TIFF", ".gif": "GIF"}
CV2_FONTS = [cv2.FONT_HERSHEY_SIMPLEX, cv2.FONT_HERSHEY_PLAIN, cv2.FONT_HERSHEY_DUPLEX,
cv2.FONT_HERSHEY_COMPLEX, cv2.FONT_HERSHEY_TRIPLEX, cv2.FONT_HERSHEY_COMPLEX_SMALL,
cv2.FONT_HERSHEY_SCRIPT_SIMPLEX, cv2.FONT_HERSHEY_SCRIPT_COMPLEX]
DECODERS = {}
ENCODERS = {}
# --------------------------------------------------------------------
# Modes supported by this version
_MODEINFO = {
# NOTE: this table will be removed in future versions. use
# getmode* functions or ImageMode descriptors instead.
# official modes
"1": ("L", "L", ("1",)),
"L": ("L", "L", ("L",)),
"I": ("L", "I", ("I",)),
"F": ("L", "F", ("F",)),
"P": ("RGB", "L", ("P",)),
"RGB": ("RGB", "L", ("R", "G", "B")),
"RGBX": ("RGB", "L", ("R", "G", "B", "X")),
"RGBA": ("RGB", "L", ("R", "G", "B", "A")),
"CMYK": ("RGB", "L", ("C", "M", "Y", "K")),
"YCbCr": ("RGB", "L", ("Y", "Cb", "Cr")),
"LAB": ("RGB", "L", ("L", "A", "B")),
"HSV": ("RGB", "L", ("H", "S", "V")),
# Experimental modes include I;16, I;16L, I;16B, RGBa, BGR;15, and
# BGR;24. Use these modes only if you know exactly what you're
# doing...
}
if sys.byteorder == 'little':
_ENDIAN = '<'
else:
_ENDIAN = '>'
_MODE_CONV = {
# official modes
"1": ('|b1', None), # Bits need to be extended to bytes
"L": ('|u1', None),
"LA": ('|u1', 2),
"I": (_ENDIAN + 'i4', None),
"F": (_ENDIAN + 'f4', None),
"P": ('|u1', None),
"RGB": ('|u1', 3),
"RGBX": ('|u1', 4),
"RGBA": ('|u1', 4),
"CMYK": ('|u1', 4),
"YCbCr": ('|u1', 3),
"LAB": ('|u1', 3), # UNDONE - unsigned |u1i1i1
"HSV": ('|u1', 3),
# I;16 == I;16L, and I;32 == I;32L
"I;16": ('<u2', None),
"I;16B": ('>u2', None),
"I;16L": ('<u2', None),
"I;16S": ('<i2', None),
"I;16BS": ('>i2', None),
"I;16LS": ('<i2', None),
"I;32": ('<u4', None),
"I;32B": ('>u4', None),
"I;32L": ('<u4', None),
"I;32S": ('<i4', None),
"I;32BS": ('>i4', None),
"I;32LS": ('<i4', None),
}
def _conv_type_shape(im):
typ, extra = _MODE_CONV[im.mode]
if extra is None:
return (im.size[1], im.size[0]), typ
else:
return (im.size[1], im.size[0], extra), typ
MODES = sorted(_MODEINFO)
# raw modes that may be memory mapped. NOTE: if you change this, you
# may have to modify the stride calculation in map.c too!
_MAPMODES = ("L", "P", "RGBX", "RGBA", "CMYK", "I;16", "I;16L", "I;16B")
if bitmap_classes_ok:
try:
class BITMAPFILEHEADER(ctypes.Structure):
_pack_ = 1 # structure field byte alignment
_fields_ = [
('bfType', WORD), # file type ("BM")
('bfSize', DWORD), # file size in bytes
('bfReserved1', WORD), # must be zero
('bfReserved2', WORD), # must be zero
('bfOffBits', DWORD), # byte offset to the pixel array
]
SIZEOF_BITMAPFILEHEADER = ctypes.sizeof(BITMAPFILEHEADER)
class BITMAPINFOHEADER(ctypes.Structure):
_pack_ = 1 # structure field byte alignment
_fields_ = [
('biSize', DWORD),
('biWidth', LONG),
('biHeight', LONG),
('biPLanes', WORD),
('biBitCount', WORD),
('biCompression', DWORD),
('biSizeImage', DWORD),
('biXPelsPerMeter', LONG),
('biYPelsPerMeter', LONG),
('biClrUsed', DWORD),
('biClrImportant', DWORD)
]
SIZEOF_BITMAPINFOHEADER = ctypes.sizeof(BITMAPINFOHEADER)
bitmap_classes_ok = True
except:
bitmap_classes_ok = False
def getmodebase(mode):
"""
Gets the "base" mode for given mode. This function returns "L" for
images that contain grayscale data, and "RGB" for images that
contain color data.
:param mode: Input mode.
:returns: "L" or "RGB".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode().getmode(mode).basemode
def getmodetype(mode):
"""
Gets the storage type mode. Given a mode, this function returns a
single-layer mode suitable for storing individual bands.
:param mode: Input mode.
:returns: "L", "I", or "F".
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode().getmode(mode).basetype
def getmodebandnames(mode):
"""
Gets a list of individual band names. Given a mode, this function returns
a tuple containing the names of individual bands (use
:py:method:`~PIL.Image.getmodetype` to get the mode used to store each
individual band.
:param mode: Input mode.
:returns: A tuple containing band names. The length of the tuple
gives the number of bands in an image of the given mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return ImageMode().getmode(mode).bands
def getmodebands(mode):
"""
Gets the number of individual bands for this mode.
:param mode: Input mode.
:returns: The number of bands in this mode.
:exception KeyError: If the input mode was not a standard mode.
"""
return len(ImageMode().getmode(mode).bands)
colormap = {
# X11 colour table from https://drafts.csswg.org/css-color-4/, with
# gray/grey spelling issues fixed. This is a superset of HTML 4.0
# colour names used in CSS 1.
"aliceblue": "#f0f8ff",
"antiquewhite": "#faebd7",
"aqua": "#00ffff",
"aquamarine": "#7fffd4",
"azure": "#f0ffff",
"beige": "#f5f5dc",
"bisque": "#ffe4c4",
"black": "#000000",
"blanchedalmond": "#ffebcd",
"blue": "#0000ff",
"blueviolet": "#8a2be2",
"brown": "#a52a2a",
"burlywood": "#deb887",
"cadetblue": "#5f9ea0",
"chartreuse": "#7fff00",
"chocolate": "#d2691e",
"coral": "#ff7f50",
"cornflowerblue": "#6495ed",
"cornsilk": "#fff8dc",
"crimson": "#dc143c",
"cyan": "#00ffff",
"darkblue": "#00008b",
"darkcyan": "#008b8b",
"darkgoldenrod": "#b8860b",
"darkgray": "#a9a9a9",
"darkgrey": "#a9a9a9",
"darkgreen": "#006400",
"darkkhaki": "#bdb76b",
"darkmagenta": "#8b008b",
"darkolivegreen": "#556b2f",
"darkorange": "#ff8c00",
"darkorchid": "#9932cc",
"darkred": "#8b0000",
"darksalmon": "#e9967a",
"darkseagreen": "#8fbc8f",
"darkslateblue": "#483d8b",
"darkslategray": "#2f4f4f",
"darkslategrey": "#2f4f4f",
"darkturquoise": "#00ced1",
"darkviolet": "#9400d3",
"deeppink": "#ff1493",
"deepskyblue": "#00bfff",
"dimgray": "#696969",
"dimgrey": "#696969",
"dodgerblue": "#1e90ff",
"firebrick": "#b22222",
"floralwhite": "#fffaf0",
"forestgreen": "#228b22",
"fuchsia": "#ff00ff",
"gainsboro": "#dcdcdc",
"ghostwhite": "#f8f8ff",
"gold": "#ffd700",
"goldenrod": "#daa520",
"gray": "#808080",
"grey": "#808080",
"green": "#008000",
"greenyellow": "#adff2f",
"honeydew": "#f0fff0",
"hotpink": "#ff69b4",
"indianred": "#cd5c5c",
"indigo": "#4b0082",
"ivory": "#fffff0",
"khaki": "#f0e68c",
"lavender": "#e6e6fa",
"lavenderblush": "#fff0f5",
"lawngreen": "#7cfc00",
"lemonchiffon": "#fffacd",
"lightblue": "#add8e6",
"lightcoral": "#f08080",
"lightcyan": "#e0ffff",
"lightgoldenrodyellow": "#fafad2",
"lightgreen": "#90ee90",
"lightgray": "#d3d3d3",
"lightgrey": "#d3d3d3",
"lightpink": "#ffb6c1",
"lightsalmon": "#ffa07a",
"lightseagreen": "#20b2aa",
"lightskyblue": "#87cefa",
"lightslategray": "#778899",
"lightslategrey": "#778899",
"lightsteelblue": "#b0c4de",
"lightyellow": "#ffffe0",
"lime": "#00ff00",
"limegreen": "#32cd32",
"linen": "#faf0e6",
"magenta": "#ff00ff",
"maroon": "#800000",
"mediumaquamarine": "#66cdaa",
"mediumblue": "#0000cd",
"mediumorchid": "#ba55d3",
"mediumpurple": "#9370db",
"mediumseagreen": "#3cb371",
"mediumslateblue": "#7b68ee",
"mediumspringgreen": "#00fa9a",
"mediumturquoise": "#48d1cc",
"mediumvioletred": "#c71585",
"midnightblue": "#191970",
"mintcream": "#f5fffa",
"mistyrose": "#ffe4e1",
"moccasin": "#ffe4b5",
"navajowhite": "#ffdead",
"navy": "#000080",
"oldlace": "#fdf5e6",
"olive": "#808000",
"olivedrab": "#6b8e23",
"orange": "#ffa500",
"orangered": "#ff4500",
"orchid": "#da70d6",
"palegoldenrod": "#eee8aa",
"palegreen": "#98fb98",
"paleturquoise": "#afeeee",
"palevioletred": "#db7093",
"papayawhip": "#ffefd5",
"peachpuff": "#ffdab9",
"peru": "#cd853f",
"pink": "#ffc0cb",
"plum": "#dda0dd",
"powderblue": "#b0e0e6",
"purple": "#800080",
"rebeccapurple": "#663399",
"red": "#ff0000",
"rosybrown": "#bc8f8f",
"royalblue": "#4169e1",
"saddlebrown": "#8b4513",
"salmon": "#fa8072",
"sandybrown": "#f4a460",
"seagreen": "#2e8b57",
"seashell": "#fff5ee",
"sienna": "#a0522d",
"silver": "#c0c0c0",
"skyblue": "#87ceeb",
"slateblue": "#6a5acd",
"slategray": "#708090",
"slategrey": "#708090",
"snow": "#fffafa",
"springgreen": "#00ff7f",
"steelblue": "#4682b4",
"tan": "#d2b48c",
"teal": "#008080",
"thistle": "#d8bfd8",
"tomato": "#ff6347",
"turquoise": "#40e0d0",
"violet": "#ee82ee",
"wheat": "#f5deb3",
"white": "#ffffff",
"whitesmoke": "#f5f5f5",
"yellow": "#ffff00",
"yellowgreen": "#9acd32",
}
class ImagePointHandler:
# used as a mixin by point transforms (for use with im.point)
pass
class ImageTransformHandler:
# used as a mixin by geometry transforms (for use with im.transform)
pass
class Image(object):
def __init__(self, image=None, filename=None, format=None, instances=[], exts=[], image_specs={}):
self._instance = image
self.filename = filename
self.format = format
self.frames = instances
self.n_frames = len(self.frames)
if self.n_frames>1:
self.is_animated = True
else:
self.is_animated = False
self._frame_nr = 0
self.exts = exts
self.image_specs = image_specs
self._mode = None
if image is not None or filename is not None:
if self.filename is not None:
ext = os.path.splitext(self.filename)[1]
self.format = EXTENSION[ext]
if self._instance is not None:
self.size = (self._instance.shape[1], self._instance.shape[0])
if len(self._instance.shape)>2:
self.layers = self.bands = self._instance.shape[2]
else:
self.layers = self.bands = 1
self.dtype = self._instance.dtype
if self.dtype == np.uint8:
self.bits = 8
self._mode = self._get_mode(self._instance.shape, self.dtype)
else:
self._mode = None
self.size = (0, 0)
self.dtype = None
self.mode = self._mode
# @property
# def size(self):
# return self._instance.shape[:2][::-1]
# @property
# def width(self):
# return self._instance.shape[1]
# @property
# def height(self):
# return self._instance.size[0]
# @property
# def mode(self):
# if self._mode:
# return self._mode
# else:
# raise ValueError('No mode specified.')
# @property
# def shape(self):
# return self._instance.shape
# @property
# def get_instance(self):
# return self._instance
def _get_channels_and_depth(self, mode):
mode = str(mode).upper()
if mode == '1':
return 1 , np.bool
if mode == 'L':
return 1, np.uint8
if mode == 'LA':
return 2, np.uint8
if mode == 'P':
return 1, np.uint8
if mode == 'RGB':
return 3, np.uint8
if mode == 'RGBA':
return 4, np.uint8
if mode == 'CMYK':
return 4, np.uint8
if mode == 'YCBCR':
return 3, np.uint8
if mode == 'LAB':
return 3, np.uint8
if mode == 'HSV':
return 3, np.uint8
if mode == 'I':
return 1, np.int32
if mode == 'F':
return 1, np.float32
raise ValueError('Your mode name is incorrect.')
def _get_converting_flag(self, mode, inst=None):
"returns the cv2 flag for color conversion from inst to mode, uses the mode of the image object by default"
mode = mode.upper()
if inst is None:
inst = self._mode.upper()
if mode == inst:
return "EQUAL"
converting_table = {
'L':{
'RGB':cv2.COLOR_GRAY2BGR,
'RGBA':cv2.COLOR_GRAY2BGRA
},
'RGB':{
'1':cv2.COLOR_BGR2GRAY,
'L':cv2.COLOR_BGR2GRAY,
'LAB':cv2.COLOR_BGR2LAB,
'HSV':cv2.COLOR_BGR2HSV,
'YCBCR':cv2.COLOR_BGR2YCR_CB,
'RGBA':cv2.COLOR_BGR2BGRA
},
'RGBA':{
'1':cv2.COLOR_BGRA2GRAY,
'L':cv2.COLOR_BGRA2GRAY,
'RGB':cv2.COLOR_BGRA2BGR
},
'LAB':{
'RGB':cv2.COLOR_LAB2BGR
},
'HSV':{
'RGB':cv2.COLOR_HSV2BGR
},
'YCBCR':{
'RGB':cv2.COLOR_YCR_CB2BGR
}
}
if inst in converting_table:
if mode in converting_table[inst]:
return converting_table[inst][mode]
else:
raise ValueError('You can not convert image to this type')
else:
raise ValueError('This image type can not be converted')
def _get_mode(self, shape, depth):
if len(shape) == 2:
channels = 1
else:
channels = shape[2]
if channels == 1 and depth == np.bool:
return '1'
if channels == 1 and depth == np.uint8:
return 'L'
if channels == 1 and depth == np.uint8:
return 'P'
if channels == 2 and depth == np.uint8:
return 'LA'
if channels == 3 and depth == np.uint8:
return 'RGB'
if channels == 4 and depth == np.uint8:
return 'RGBA'
if channels == 4 and depth == np.uint8:
return 'CMYK'
if channels == 3 and depth == np.uint8:
return 'YCBCR'
if channels == 3 and depth == np.uint8:
return 'LAB'
if channels == 3 and depth == np.uint8:
return 'HSV'
if channels == 1 and depth == np.int32:
return 'I'
if channels == 1 and depth == np.float32 :
return 'F'
def _new(self, mode, size, color=None):
self._mode = mode
channels, depth = self._get_channels_and_depth(mode)
size = size[::-1]
self._instance = np.zeros(size + (channels,), depth)
if color is not None:
self._instance[:, 0:] = color
return self._instance
def alpha_composite(self, im, dest=(0, 0), source=(0, 0)):
""" 'In-place' analog of Image.alpha_composite. Composites an image
onto this image.
:param im: image to composite over this one
:param dest: Optional 2 tuple (left, top) specifying the upper
left corner in this (destination) image.
:param source: Optional 2 (left, top) tuple for the upper left
corner in the overlay source image, or 4 tuple (left, top, right,
bottom) for the bounds of the source rectangle
Performance Note: Not currently implemented in-place in the core layer.
"""
if not isinstance(source, (list, tuple)):
raise ValueError("Source must be a tuple")
if not isinstance(dest, (list, tuple)):
raise ValueError("Destination must be a tuple")
if not len(source) in (2, 4):
raise ValueError("Source must be a 2 or 4-tuple")
if not len(dest) == 2:
raise ValueError("Destination must be a 2-tuple")
if min(source) < 0:
raise ValueError("Source must be non-negative")
if min(dest) < 0:
raise ValueError("Destination must be non-negative")
channels, depth = self._get_channels_and_depth(im)
_mode = self._get_mode(im.shape, im.dtype)
_im = self._new(_mode, (im.shape[1], im.shape[0]))
if len(source) == 2:
source = source + _im.size
# over image, crop if it's not the whole thing.
if source == (0, 0) + _im.size:
overlay = _im
else:
overlay = _im.crop(source)
# target for the paste
box = dest + (dest[0] + overlay.width, dest[1] + overlay.height)
# destination image. don't copy if we're using the whole image.
if box == (0, 0) + self.size:
background = self._instance
else:
background = self.crop(box)
result = alpha_composite(background, overlay)
self.paste(result, box)
def crop(self, box, image=None):
"crops the image to the box which is a tuple = left, upper, right, lower"
if image is None:
part = self._instance[box[1]:box[3], box[0]:box[2]]
return Image(part)
else:
image = image[box[1]:box[3], box[0]:box[2]]
return image
def copy(self):
"returns a deep copy of the original"
return Image(self._instance.copy(), format=self.format)
def close(self):
"closes all opened windows"
cv2.destroyAllWindows()
return None
def draft(self, mode, size):
"""
Configures the image file loader so it returns a version of the
image that as closely as possible matches the given mode and
size. For example, you can use this method to convert a color
JPEG to greyscale while loading it, or to extract a 128x192
version from a PCD file.
Note that this method modifies the :py:class:`~PIL.Image.Image` object
in place. If the image has already been loaded, this method has no
effect.
Note: This method is not implemented for most images. It is
currently implemented only for JPEG and PCD images.
:param mode: The requested mode.
:param size: The requested size.
"""
pass
def frombytes(self, mode, size, data, decoder_name="raw", *args):
"""
Loads this image with pixel data from a bytes object.
This method is similar to the :py:func:`~PIL.Image.frombytes` function,
but loads data into this image instead of creating a new image object.
"""
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
# default format
if decoder_name == "raw" and args == ():
args = self.mode
# unpack data
channels, depth = self._get_channels_and_depth(mode)
self._instance = np.fromstring(data, dtype=depth)
try:
self._instance = self._instance.reshape((size[1], size[0], channels))
except:
raise ValueError("not enough image data")
try:
self._instance = self._instance.astype(depth)
if channels == 3:
self._instance = cv2.cvtColor(self._instance, cv2.COLOR_BGR2RGB)
elif channels == 4:
self._instance = cv2.cvtColor(self._instance, cv2.COLOR_BGRA2RGBA)
except:
raise ValueError("cannot decode image data")
def fromstring(self, mode, size, data, decoder_name="raw", *args):
# raise NotImplementedError("fromstring() has been removed. "
# "Please call frombytes() instead.")
self.frombytes(mode, size, data, decoder_name, *args)
def convert(self, mode):
"converts an image to the given mode"
if self._mode.upper() == mode.upper():
return Image(self._instance.copy())
if not mode and self.mode == "P":
# determine default mode
if self.palette:
mode = self.palette.mode
else:
mode = "RGB"
if not mode or (mode == self.mode):
return Image(self._instance.copy())
return Image(self._convert(mode))
def _convert(self, mode, obj=None):
if obj is None:
obj = self._instance
flag = self._get_converting_flag(mode)
else:
orig_mode = self._get_mode(obj.shape, obj.dtype)
flag = self._get_converting_flag(mode, inst=orig_mode)
if flag == "EQUAL":
return obj.copy()
if mode == "1":
im_gray = cv2.cvtColor(obj, cv2.COLOR_BGR2GRAY)
thresh, converted = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)
else:
converted = cv2.cvtColor(obj, flag)
return converted
def paste(self, img_color, box=None, mask=None):
"pastes either an image or a color to a region of interest defined in box with a mask"
if isinstance(img_color, Image): # pasting an image
_img_color = img_color._instance
if box is None:
box = (0, 0)
else:
if len(box) == 4:
if not(box[2]-box[0]==_img_color.shape[1] and box[3]-box[1]==_img_color.shape[0]):
raise ValueError("images do not match")
# convert modes
if len(img_color._instance.shape) == 3:
if img_color._instance.shape[2] != self._instance.shape[2] or img_color._instance.dtype != self._instance.dtype:
dest_mode = self._mode
_img_color = self._convert(dest_mode, obj=_img_color)
elif len(img_color._instance.shape) != len(self._instance.shape):
dest_mode = self._mode
_img_color = self._convert(dest_mode, obj=_img_color)
else: # pasting a colorbox
if box is None:
raise ValueError("cannot determine region size; use 4-item box")
img_dim = (box[3]-box[1]+1, box[2]-box[0]+1)
channels, depth = self._get_channels_and_depth(self._mode)
colorbox = np.zeros((img_dim[0], img_dim[1], channels), dtype=depth)
colorbox[:] = img_color
_img_color = colorbox.copy()
if mask is None:
self._instance = self._paste(self._instance, _img_color, box[0], box[1])
else:
# enlarge the image _img_color without resizing to the new_canvas
new_canvas = np.zeros(self._instance.shape, dtype=self._instance.dtype)
new_canvas = self._paste(new_canvas, _img_color, box[0], box[1])
if len(mask._instance.shape) == 3:
if mask._instance.shape[2] == 4: # RGBA
r, g, b, _mask = self.split(mask)
elif mask._instance.shape[2] == 1:
_mask = mask._instance.copy()
else:
_mask = mask._instance.copy()
if _mask.shape[:2] != new_canvas.shape[:2]:
_new_mask = np.zeros(self._instance.shape[:2], dtype=self._instance.dtype)
_new_mask = ~(self._paste(_new_mask, _mask, box[0], box[1]))
else:
_new_mask = ~_mask
self._instance = composite(self._instance, new_canvas, _new_mask, np_image=True)
def _paste(self, mother, child, x, y):
"Pastes the numpy image child into the numpy image mother at position (x, y)"
size = mother.shape
csize = child.shape
if y+csize[0]<0 or x+csize[1]<0 or y>size[0] or x>size[1]: return mother
sel = [int(y), int(x), csize[0], csize[1]]
csel = [0, 0, csize[0], csize[1]]
if y<0:
sel[0] = 0
sel[2] = csel[2] + y
csel[0] = -y
elif y+sel[2]>=size[0]:
sel[2] = int(size[0])
csel[2] = size[0]-y
else:
sel[2] = sel[0] + sel[2]
if x<0:
sel[1] = 0
sel[3] = csel[3] + x
csel[1] = -x
elif x+sel[3]>=size[1]:
sel[3] = int(size[1])
csel[3] = size[1]-x
else:
sel[3] = sel[1] + sel[3]
childpart = child[csel[0]:csel[2], csel[1]:csel[3]]
mother[sel[0]:sel[2], sel[1]:sel[3]] = childpart
return mother
def _scaleTo8Bit(self, image, div, displayMin=None, displayMax=None):
if displayMin == None:
displayMin = np.min(image)
if displayMax == None:
displayMax = np.max(image)
np.clip(image, displayMin, displayMax, out=image)
image = image - displayMin
cf = 255. / (displayMax - displayMin)
imageOut = (cf*image).astype(np.uint8)
return imageOut
def _filter_kernel(self, fa):
kernel = np.array(fa[3], dtype=np.float32)/fa[1]
kernel = kernel.reshape(fa[0])
# print(kernel)
return kernel
def filter(self, filtermethod):
"Filters this image using the given filter."
if filtermethod.name == "GaussianBlur":
return GaussianBlur().filter(self)
fa = filtermethod.filterargs
if filtermethod == EMBOSS:
_im = self._instance.astype(np.float32)
_im = cv2.filter2D(_im, -1, self._filter_kernel(fa))
_im = self._scaleTo8Bit(_im, fa[2])
elif filtermethod == CONTOUR:
_im = cv2.filter2D(self._instance, -1, self._filter_kernel(fa))
_im = ~_im
else:
_im = cv2.filter2D(self._instance, -1, self._filter_kernel(fa))
return Image(_im)
def getband(self, channel):
channels, depth = self._get_channels_and_depth(self._mode)
if channels == 1:
return self._instance.copy()
else:
chs = self.split()
return chs[channel]
def getbands(self):
return tuple([i for i in self._mode])
def getbbox(self):
"""
Calculates the bounding box of the non-zero regions in the
image.
:returns: The bounding box is returned as a 4-tuple defining the
left, upper, right, and lower pixel coordinate. See
:ref:`coordinate-system`. If the image is completely empty, this
method returns None.
"""
img_ = (self._instance > 0)
rows = np.any(img_, axis=1)
cols = np.any(img_, axis=0)
rmin, rmax = np.argmax(rows), img_.shape[0] - 1 - np.argmax(np.flipud(rows))
cmin, cmax = np.argmax(cols), img_.shape[1] - 1 - np.argmax(np.flipud(cols))
return (rmin, rmax, cmin, cmax)
def _getcolors(self):
channels, depth = self._get_channels_and_depth(self._mode)
if channels == 1:
img = self._instance.copy()
y = img.shape[0]
x = img.shape[1]
flattened = img.reshape((x*y, 1))
uni, counts = np.unique(flattened, return_counts=True)
else:
if channels == 4:
r ,g, b, a = self.split()
colorband = (r, g, b)
img = merge("RGB", colorband, image=True)
else: # channels == 3
img = self._instance.copy()
y = img.shape[0]
x = img.shape[1]
flattened = img.reshape((x*y, 3))
uni, counts = np.unique(flattened, axis=0, return_counts=True)
return uni, counts
def getcolors(self, maxcolors=256):
"""
Returns a list of colors used in this image.
:param maxcolors: Maximum number of colors. If this number is
exceeded, this method returns None. The default limit is
256 colors.
:returns: An unsorted list of (count, pixel) values.
"""
if self._mode in ("1", "L", "P"):
h = self._instance.histogram()
out = []
for i in range(256):
if h[i]:
out.append((h[i], i))
if len(out) > maxcolors:
return None
return out
uni, counts = self._getcolors()
if c>maxcolors: return None
colors = []
for l in range(len(counts)):
colors.append((counts[l], l))
return colors
def getdata(self, band=None):
channels, depth = self._get_channels_and_depth(self._mode)
flattened = self._instance.reshape((self.size[0]*self.size[1], channels))
return flattened
def getextrema(self):
return (np.minimum(self._instance), np.maximum(self._instance))
def getim(self):
return self._instance
def getpalette(self):
uni, counts = self._getcolors()
colors = list(np.ravel(uni))
return colors
def getpixel(self, xytup):
return self._instance[y, x]
def histogram(self, mask=None, extrema=None):
"""
Returns a histogram for the image. The histogram is returned as
a list of pixel counts, one for each pixel value in the source
image. If the image has more than one band, the histograms for
all bands are concatenated (for example, the histogram for an
"RGB" image contains 768 values).
A bilevel image (mode "1") is treated as a greyscale ("L") image
by this method.
If a mask is provided, the method returns a histogram for those
parts of the image where the mask image is non-zero. The mask
image must have the same size as the image, and be either a
bi-level image (mode "1") or a greyscale image ("L").
:param mask: An optional mask.
:returns: A list containing pixel counts.
"""
uni, counts = self._getcolors()
return [l for l in counts]
def offset(self, xoffset, yoffset=None):
raise NotImplementedError("offset() has been removed. "
"Please call ImageChops.offset() instead.")
def point(self, lut, mode=None):
"Map image through lookup table"
raise NotImplementedError("point() has been not implemented in this library. ")
def putpixel(self, xytup, color):
self._instance[xytup[1], xytup[0]] = color
def putalpha(self, alpha):
"""
Adds or replaces the alpha layer in this image. If the image
does not have an alpha layer, it's converted to "LA" or "RGBA".
The new layer must be either "L" or "1".
:param alpha: The new alpha layer. This can either be an "L" or "1"
image having the same size as this image, or an integer or
other color value.
"""
channels, depth = self._get_channels_and_depth(self._mode)
if isinstance(alpha, np.ndarray):
paste_image = True
else:
paste_image = False
if channels==4:
r, g, b, a = self.split()
if not paste_image:
a[:] = alpha
else:
a = alpha.copy()
colorband = (r, g, b, a)
self._instance = merge("RGBA", colorband, image=True)
elif channels == 3:
if not paste_image:
sh = self._instance.shape
sh = (sh[0], sh[1], 1)
a = np.zeros(sh, dtype=depth)
a[:] = alpha
else:
a = alpha.copy()
r, g, b = self.split()
colorband = (r, g, b, a)
self._instance = merge("RGBA", colorband, image=True)
elif channels < 2: # "L" or "LA"
if not paste_image:
sh = self._instance.shape
sh = (sh[0], sh[1], 1)
a = np.zeros(sh, dtype=depth)
a[:] = alpha
else:
a = alpha.copy()
if channels == 2:
l, a_old = self.split()
colorband = (l, a)
else:
colorband = (self._instance, a)
self._instance = merge("LA", colorband, image=True)
def putdata(self, dat, scale=1.0, offset=0.0):
"""
Copies pixel data to this image. This method copies data from a
sequence object into the image, starting at the upper left
corner (0, 0), and continuing until either the image or the
sequence ends. The scale and offset values are used to adjust
the sequence values: **pixel = value*scale + offset**.
:param data: A sequence object.
:param scale: An optional scale value. The default is 1.0.
:param offset: An optional offset value. The default is 0.0.
"""
data = np.array(dat)
data = data * scale + offset
channels, depth = self._get_channels_and_depth(self._mode)
siz = self.size
_im = np.ravel(self._instance)
data = data[:len(_im)]
_im = _im[:len(data)] = data
self._instance = _im.reshape((siz[1], siz[0], channels))
self._instance = self._instance.astype(depth)
def putpalette(self, data, rawmode="RGB"):
raise NotImplementedError("putpalette() has been not implemented in this library. ")
def quantize(self, colors=256, method=None, kmeans=0, palette=None):
raise NotImplementedError("quantize() has been not implemented in this library. ")
def remap_palette(self, dest_map, source_palette=None):
raise NotImplementedError("remap_palette() has been not implemented in this library. ")
def resize(self, size, filtermethod = cv2.INTER_LINEAR, image=None):
"resizes an image according to the given filter/interpolation method NEAREST, BILINEAR/INTER_LINEAR, BICUBIC, LANCZOS, INTERAREA"
if image is None:
_im = cv2.resize(self._instance, size, interpolation = filtermethod)
return Image(_im)
else:
return cv2.resize(image, size, interpolation = filtermethod)
def rotate_bound(self, angle, fillcolor=None):
# grab the dimensions of the image and then determine the
# center
h, w = self._instance.shape[:2]
(cX, cY) = (w // 2, h // 2)
# grab the rotation matrix (applying the negative of the
# angle to rotate clockwise), then grab the sine and cosine
# (i.e., the rotation components of the matrix)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
# compute the new bounding dimensions of the image
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
# adjust the rotation matrix to take into account translation
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
# perform the actual rotation and return the image
return cv2.warpAffine(self._instance, M, (nW, nH), borderValue=fillcolor)
def translated(self, image, x, y):
# define the translation matrix and perform the translation
M = np.float32([[1, 0, x], [0, 1, y]])
shifted = cv2.warpAffine(image, M, (image.shape[1], image.shape[0]))
# return the translated image
return shifted
def rotate(self, angle, resample=NEAREST, expand=0, center=None,
translate=None, fillcolor=None):
"""
Returns a rotated copy of this image. This method returns a
copy of this image, rotated the given number of degrees counter
clockwise around its centre.
:param angle: In degrees counter clockwise.
:param resample: An optional resampling filter. This can be
one of :py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC`
(cubic spline interpolation in a 4x4 environment).
If omitted, or if the image has mode "1" or "P", it is
set :py:attr:`PIL.Image.NEAREST`. See :ref:`concept-filters`.
:param expand: Optional expansion flag. If true, expands the output
image to make it large enough to hold the entire rotated image.
If false or omitted, make the output image the same size as the
input image. Note that the expand flag assumes rotation around
the center and no translation.
:param center: Optional center of rotation (a 2-tuple). Origin is
the upper left corner. Default is the center of the image.
:param translate: An optional post-rotate translation (a 2-tuple).
:param fillcolor: An optional color for area outside the rotated image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
angle = angle % 360.0
if fillcolor is None:
fillcolor = (0, 0, 0)
if expand == 0:
# grab the dimensions of the image
h, w = self.size[1], self.size[0]
# if the center is None, initialize it as the center of
# the image
if center is None:
center = (w // 2, h // 2)
scale = 1.0
# perform the rotation
M = cv2.getRotationMatrix2D(center, angle, scale)
_im = cv2.warpAffine(self._instance, M, (w, h), borderValue=fillcolor)
else:
_im = self.rotate_bound(angle)
if translate is not None:
_im = self.translated(_im, translate[0], translate[0])
return Image(_im)
def save(self, fp, format=None, **params):
"""
Saves this image under the given filename. If no format is
specified, the format to use is determined from the filename
extension, if possible.
Keyword options can be used to provide additional instructions
to the writer. If a writer doesn't recognise an option, it is
silently ignored. The available options are described in the
:doc:`image format documentation
<../handbook/image-file-formats>` for each writer.
You can use a file object instead of a filename. In this case,
you must always specify the format. The file object must
implement the ``seek``, ``tell``, and ``write``
methods, and be opened in binary mode.
:param fp: A filename (string), pathlib.Path object or file object.
:param format: Optional format override. If omitted, the
format to use is determined from the filename extension.
If a file object was used instead of a filename, this
parameter should always be used.
:param params: Extra parameters to the image writer.
:returns: None
:exception ValueError: If the output format could not be determined
from the file name. Use the format option to solve this.
:exception IOError: If the file could not be written. The file
may have been created, and may contain partial data.
"""
if isinstance(fp, basstring):
if fp.lower().endswith(".gif"):
if numpy2gif_installed:
if self.is_animated:
numpy2gif.write_gif(self.frames, fp, fps=100//self.exts[0][['delay_time']])
else:
numpy2gif.write_gif(self._instance, fp)
else:
NotImplementedError("numpy2gif is not installed so cannot save gif images, install it with: pip install numpy2gif")
else:
cv2.imwrite(fp, self._instance)
return None
if isinstance(fp, fil_object):
fl = open(format, 'w')
fl.write(fp.read())
fl.close()
return None
return None
def seek(self, frame):
"""
Seeks to the given frame in this sequence file. If you numpy2gifek
beyond the end of the sequence, the method raises an
**EOFError** exception. When a sequence file is opened, the
library automatically seeks to frame 0.
Note that in the current version of the library, most sequence
formats only allows you to seek to the next frame.
See :py:meth:`~PIL.Image.Image.tell`.
:param frame: Frame number, starting at 0.
:exception EOFError: If the call attempts to seek beyond the end
of the sequence.
"""
if frame>=self.n_frames:
raise EOFError("Frame number is beyond the number of frames")
else:
self._frame_nr = frame
self._instance = self.frames[frame]
def setim(self, numpy_image):
mode = Image()._get_mode(numpy_image.shape, numpy_image.dtype)
if mode != self._mode:
raise ValueError("Modes of mother image and child image do not match", self._mode, mode)
self._instance = numpy_image
def show(self, title=None, command=None, wait=0, destroyWindow=True):
"shows the image in a window"
if title is None:
title = ""
if command is None:
cv2.imshow(title, self._instance)
cv2.waitKey(wait)
if destroyWindow:
cv2.destroyWindow(title)
else:
flag, fname = tempfile.mkstemp()
cv2.imwrite(fname, self._instance)
os.system(command+" "+fname)
def split(self, image=None):
"splits the image into its color bands"
if image is None:
if len(self._instance.shape) == 3:
if self._instance.shape[2] == 1:
return self._instance.copy()
elif self._instance.shape[2] == 2:
l, a = cv2.split(self._instance)
return l, a
elif self._instance.shape[2] == 3:
b, g, r = cv2.split(self._instance)
return b, g, r
else:
b, g, r, a = cv2.split(self._instance)
return b, g, r, a
else:
return self._instance
else:
if len(self._instance.shape) == 3:
if image.shape[2] == 1:
return image.copy()
elif image.shape[2] == 2:
l, a = cv2.split(image)
return l, a
elif image.shape[2] == 3:
b, g, r = cv2.split(image)
return b, g, r
else:
b, g, r, a = cv2.split(image)
return b, g, r, a
else:
return self._instance
def getchannel(self, channel):
"""
Returns an image containing a single channel of the source image.
:param channel: What channel to return. Could be index
(0 for "R" channel of "RGB") or channel name
("A" for alpha channel of "RGBA").
:returns: An image in "L" mode.
.. versionadded:: 4.3.0
"""
if isinstance(channel, basstring):
try:
channel = self.getbands().index(channel)
except ValueError:
raise ValueError(
'The image has no channel "{}"'.format(channel))
return self.getband(channel)
def tell(self):
"""
Returns the current frame number. See :py:meth:`~PIL.Image.Image.seek`.
:returns: Frame number, starting with 0.
"""
return self._frame_nr
def thumbnail(self, size, resample=BICUBIC):
"""
Make this image into a thumbnail. This method modifies the
image to contain a thumbnail version of itself, no larger than
the given size. This method calculates an appropriate thumbnail
size to preserve the aspect of the image, calls the
:py:meth:`~PIL.Image.Image.draft` method to configure the file reader
(where applicable), and finally resizes the image.
Note that this function modifies the :py:class:`~PIL.Image.Image`
object in place. If you need to use the full resolution image as well,
apply this method to a :py:meth:`~PIL.Image.Image.copy` of the original
image.
:param size: Requested size.
:param resample: Optional resampling filter. This can be one
of :py:attr:`PIL.Image.NEAREST`, :py:attr:`PIL.Image.BILINEAR`,
:py:attr:`PIL.Image.BICUBIC`, or :py:attr:`PIL.Image.LANCZOS`.
If omitted, it defaults to :py:attr:`PIL.Image.BICUBIC`.
(was :py:attr:`PIL.Image.NEAREST` prior to version 2.5.0)
:returns: None
"""
# preserve aspect ratio
x, y = self.size
if x > size[0]:
y = int(max(y * size[0] / x, 1))
x = int(size[0])
if y > size[1]:
x = int(max(x * size[1] / y, 1))
y = int(size[1])
size = x, y
if size == self.size:
return
self.draft(None, size)
self._instance = self.resize(size, resample, image=self._instance)
self.readonly = 0
self.pyaccess = None
def transform(self, size, method, data=None, resample=NEAREST,
fill=1, fillcolor=None):
"""
Transforms this image. This method creates a new image with the
given size, and the same mode as the original, and copies data
to the new image using the given transform.
:param size: The output size.
:param method: The transformation method. This is one of
:py:attr:`PIL.Image.EXTENT` (cut out a rectangular subregion),
:py:attr:`PIL.Image.AFFINE` (affine transform),
:py:attr:`PIL.Image.PERSPECTIVE` (perspective transform),
:py:attr:`PIL.Image.QUAD` (map a quadrilateral to a rectangle), or
:py:attr:`PIL.Image.MESH` (map a number of source quadrilaterals
in one operation).
It may also be an :py:class:`~PIL.Image.ImageTransformHandler`
object::
class Example(Image.ImageTransformHandler):
def transform(size, method, data, resample, fill=1):
# Return result
It may also be an object with a :py:meth:`~method.getdata` method
that returns a tuple supplying new **method** and **data** values::
class Example(object):
def getdata(self):
method = Image.EXTENT
data = (0, 0, 100, 100)
return method, data
:param data: Extra data to the transformation method.
:param resample: Optional resampling filter. It can be one of
:py:attr:`PIL.Image.NEAREST` (use nearest neighbour),
:py:attr:`PIL.Image.BILINEAR` (linear interpolation in a 2x2
environment), or :py:attr:`PIL.Image.BICUBIC` (cubic spline
interpolation in a 4x4 environment). If omitted, or if the image
has mode "1" or "P", it is set to :py:attr:`PIL.Image.NEAREST`.
:param fill: If **method** is an
:py:class:`~PIL.Image.ImageTransformHandler` object, this is one of
the arguments passed to it. Otherwise, it is unused.
:param fillcolor: Optional fill color for the area outside the
transform in the output image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
if method == EXTENT:
x0, y0, x1, y1 = data
part = self._instance[y0:y1, x0:x1]
_im = cv2.resize(part, size)
elif method == AFFINE:
x0, y0, x1, y1, x2, y2, x3, y3, x4, y4, x5, y5 = data
pts1 = np.float32([[x0, y0], [x1, y1], [x2, y2]])
pts2 = np.float32([[x3, y3], [x4, y4], [x5, y5]])
M = cv2.getAffineTransform(pts1,pts2)
_im = cv2.warpAffine(self._instance, M, size)
elif method == PERSPECTIVE or method == QUAD:
x0, y0, x1, y1, x2, y2, x3, y3 = data
pts1 = np.float32([[x0, y0], [x1, y1], [x2, y2], [x3, y3]])
pts2 = np.float32([[0,0],[size[0], 0], [0, size[1]], [size[0], size[1]]])
M = cv2.getPerspectiveTransform(pts1, pts2)
_im = cv2.warpPerspective(self._instance, M, size)
elif method == MESH:
_im = self._instance.copy()
for elem in data:
box, quad = elem
x0, y0, x1, y1, x2, y2, x3, y3 = quad
pts1 = np.float32([[x0, y0], [x1, y1], [x2, y2], [x3, y3]])
pts2 = np.float32([[box[0], box[1]],[box[2], box[1]], [box[0], box[3]], [box[2], box[3]]])
M = cv2.getPerspectiveTransform(pts1, pts2)
_im = cv2.warpPerspective(_im, M, size)
return Image(_im)
def transpose(self, method):
"""
Transpose image (flip or rotate in 90 degree steps)
:param method: One of :py:attr:`PIL.Image.FLIP_LEFT_RIGHT`,
:py:attr:`PIL.Image.FLIP_TOP_BOTTOM`, :py:attr:`PIL.Image.ROTATE_90`,
:py:attr:`PIL.Image.ROTATE_180`, :py:attr:`PIL.Image.ROTATE_270`,
:returns: Returns a flipped or rotated copy of this image.
"""
w, h = self.size
if method == FLIP_LEFT_RIGHT:
_im = cv2.flip(self._instance, 1)
elif method == FLIP_TOP_BOTTOM:
_im = cv2.flip(self._instance, 0)
elif method == ROTATE_90:
_im = self.rotate_bound(270)
x = self.size[0]//2-self.size[1]//2
box = (0, x, self.size[0], x+self.size[1])
_im = self.crop(box, _im)
elif method == ROTATE_180:
_im = self.rotate(180, self._instance)
elif method == ROTATE_270:
_im = self.rotate_bound(90)
x = self.size[0]//2-self.size[1]//2
box = (0, x, self.size[0], x+self.size[1])
_im = self.crop(box, _im)
if isinstance(_im, Image):
return _im
elif isinstance(_im, np.ndarray):
return Image(_im)
def verify(self):
"""
Verifies the contents of a file. For data read from a file, this
method attempts to determine if the file is broken, without
actually decoding the image data. If this method finds any
problems, it raises suitable exceptions. If you need to load
the image after using this method, you must reopen the image
file.
"""
pass
class FreeTypeFont(object):
"FreeType font wrapper (requires python library freetype-py)"
def __init__(self, font=None, size=10, index=0, encoding="",
layout_engine=None):
self.path = font
self.size = size
self.index = index
self.encoding = encoding
self.layout_engine = layout_engine
if os.path.isfile(self.path):
self.font = load(self.path, self.size+16)
else:
self.font = None
def getsize(text, ttf_font, scale=1.0, thickness=1):
if isinstance(ttf_font, freetype.Face):
slot = ttf_font.glyph
width, height, baseline = 0, 0, 0
previous = 0
for i,c in enumerate(text):
ttf_font.load_char(c)
bitmap = slot.bitmap
height = max(height, bitmap.rows + max(0,-(slot.bitmap_top-bitmap.rows)))
baseline = max(baseline, max(0,-(slot.bitmap_top-bitmap.rows)))
kerning = ttf_font.get_kerning(previous, c)
width += (slot.advance.x >> 6) + (kerning.x >> 6)
previous = c
else:
size = cv2.getTextSize(text, ttf_font, scale, thickness)
width = size[0][0]
height = size[0][1]
baseline = size[1]
return width, height, baseline
def getmask(text, ttf_font):
slot = ttf_font.glyph
width, height, baseline = getsize(text, ttf_font)
Z = np.zeros((height, width), dtype=np.ubyte)
x, y = 0, 0
previous = 0
for c in text:
ttf_font.load_char(c)
bitmap = slot.bitmap
top = slot.bitmap_top
left = slot.bitmap_left
w,h = bitmap.width, bitmap.rows
y = height-baseline-top
if y<=0: y=0
kerning = ttf_font.get_kerning(previous, c)
x += (kerning.x >> 6)
character = np.array(bitmap.buffer, dtype='uint8').reshape(h,w)
try:
Z[y:y+h,x:x+w] += character
except ValueError:
while x+w>Z.shape[1]:
x = x - 1
# print("new", x, y, w, h, character.shape, type(bitmap))
if x>0:
Z[:character.shape[0],x:x+w] += character
x += (slot.advance.x >> 6)
previous = c
return Z
def grab(bbox=None):
if mss_installed:
fh, filepath = tempfile.mkstemp('.png')
with mss.mss() as sct:
# The screen part to capture
if bbox is None:
filepath = sct.shot(mon=-1, output=filepath)
else:
monitor = {"top": bbox[1], "left": bbox[0], "width": bbox[2]-bbox[0], "height": bbox[3]-bbox[1]}
# Grab the data
sct_img = sct.grab(monitor)
# Save to the picture file
mss.tools.to_png(sct_img.rgb, sct_img.size, output=filepath)
return open(filepath)
else:
NotImplementedError("mss is not installed so there is no grab method available, install it with: pip install mss")
def grabclipboard():
if mss_installed:
if bitmap_classes_ok:
if sys.platform == "darwin":
fh, filepath = tempfile.mkstemp('.jpg')
os.close(fh)
commands = [
"set theFile to (open for access POSIX file \""
+ filepath + "\" with write permission)",
"try",
" write (the clipboard as JPEG picture) to theFile",
"end try",
"close access theFile"
]
script = ["osascript"]
for command in commands:
script += ["-e", command]
subprocess.call(script)
im = None
if os.stat(filepath).st_size != 0:
im = open(filepath)
os.unlink(filepath)
return im
else:
fh, filepath = tempfile.mkstemp('.bmp')
import win32clipboard, builtins
win32clipboard.OpenClipboard()
try:
if win32clipboard.IsClipboardFormatAvailable(win32clipboard.CF_DIB):
data = win32clipboard.GetClipboardData(win32clipboard.CF_DIB)
else:
data = None
finally:
win32clipboard.CloseClipboard()
if data is None: return None
bmih = BITMAPINFOHEADER()
ctypes.memmove(ctypes.pointer(bmih), data, SIZEOF_BITMAPINFOHEADER)
bmfh = BITMAPFILEHEADER()
ctypes.memset(ctypes.pointer(bmfh), 0, SIZEOF_BITMAPFILEHEADER) # zero structure
bmfh.bfType = ord('B') | (ord('M') << 8)
bmfh.bfSize = SIZEOF_BITMAPFILEHEADER + len(data) # file size
SIZEOF_COLORTABLE = 0
bmfh.bfOffBits = SIZEOF_BITMAPFILEHEADER + SIZEOF_BITMAPINFOHEADER + SIZEOF_COLORTABLE
with builtins.open(filepath, 'wb') as bmp_file:
bmp_file.write(bmfh)
bmp_file.write(data)
return open(filepath)
else:
raise NotImplementedError("grabclipboard is not available on your platform")
else:
NotImplementedError("mss is not installed so there is no grabclipboard method available, install it with: pip install mss")
def load(filename, size=12):
"""
Load a font file. This function loads a font object from the given
bitmap font file, and returns the corresponding font object.
:param filename: Name of font file.
:return: A font object.
:exception IOError: If the file could not be read.
"""
# face = Face('./VeraMono.ttf')
face = freetype.Face(filename)
face.set_char_size(size*size)
return face
def truetype(font=None, size=10, index=0, encoding="",
layout_engine=None):
"""
Load a TrueType or OpenType font from a file or file-like object,
and create a font object.
This function loads a font object from the given file or file-like
object, and creates a font object for a font of the given size.
This function requires the _imagingft service.
:param font: A filename or file-like object containing a TrueType font.
Under Windows, if the file is not found in this filename,
the loader also looks in Windows :file:`fonts/` directory.
:param size: The requested size, in points.
:param index: Which font face to load (default is first available face).
:param encoding: Which font encoding to use (default is Unicode). Common
encodings are "unic" (Unicode), "symb" (Microsoft
Symbol), "ADOB" (Adobe Standard), "ADBE" (Adobe Expert),
and "armn" (Apple Roman). See the FreeType documentation
for more information.
:param layout_engine: Which layout engine to use, if available:
`ImageFont.LAYOUT_BASIC` or `ImageFont.LAYOUT_RAQM`.
:return: A font object.
:exception IOError: If the file could not be read.
"""
if not freetype_installed:
raise NotImplementedError("freetype-py is not installed or the libfreetype.dll/dylib/so is missing, if freetype-py is not installed, install it with pip install freetype-py")
fontpath = font
font = FreeTypeFont(font, size)
if font.font is not None:
return font.font
else:
ttf_filename = os.path.basename(fontpath)
dirs = []
if sys.platform == "win32":
# check the windows font repository
# NOTE: must use uppercase WINDIR, to work around bugs in
# 1.5.2's os.environ.get()
windir = os.environ.get("WINDIR")
if windir:
dirs.append(os.path.join(windir, "Fonts"))
elif sys.platform in ('linux', 'linux2'):
lindirs = os.environ.get("XDG_DATA_DIRS", "")
if not lindirs:
# According to the freedesktop spec, XDG_DATA_DIRS should
# default to /usr/share
lindirs = '/usr/share'
dirs += [os.path.join(lindir, "fonts")
for lindir in lindirs.split(":")]
elif sys.platform == 'darwin':
dirs += ['/Library/Fonts', '/System/Library/Fonts',
os.path.expanduser('~/Library/Fonts')]
ext = os.path.splitext(ttf_filename)[1]
first_font_with_a_different_extension = None
for directory in dirs:
for walkroot, walkdir, walkfilenames in os.walk(directory):
for walkfilename in walkfilenames:
if ext and walkfilename == ttf_filename:
fontpath = os.path.join(walkroot, walkfilename)
font = FreeTypeFont(fontpath, size)
return font.font
elif (not ext and
os.path.splitext(walkfilename)[0] == ttf_filename):
fontpath = os.path.join(walkroot, walkfilename)
if os.path.splitext(fontpath)[1] == '.ttf':
font = FreeTypeFont(fontpath, size)
return font.font
raise IOError("cannot find font file")
def load_path(filename, size=12):
"""
Load font file. Same as :py:func:`~PIL.ImageFont.load`, but searches for a
bitmap font along the Python path.
:param filename: Name of font file.
:return: A font object.
:exception IOError: If the file could not be read.
"""
for directory in sys.path:
if isDirectory(directory):
if not isinstance(filename, str):
if py3:
filename = filename.decode("utf-8")
else:
filename = filename.encode("utf-8")
try:
return load(os.path.join(directory, filename), size)
except IOError:
pass
raise IOError("cannot find font file")
class ImageDraw(object):
def __init__(self, img, mode=None):
try:
self.img = img
self._img_instance = self.img._instance
self.mode = Image()._get_mode(self._img_instance.shape, self._img_instance.dtype)
self.setink()
except AttributeError:
self._img_instance = None
self.mode = None
self.ink = None
self.fill = None
self.palette = None
self.font = None
def _convert_bgr2rgb(self, color):
if isinstance(color, tuple):
if len(color) == 3:
color = color[::-1]
elif len(color) == 4:
color = color[:3][::-1] + (color[3],)
return color
def _get_coordinates(self, xy):
"Transform two tuples in a 4 array or pass the 4 array through"
if isinstance(xy[0], tuple):
coord = []
for i in range(len(xy)):
coord.append(int(xy[i][0]))
coord.append(int(xy[i][1]))
else:
coord = [int(i) for i in xy]
return coord
def _get_ellipse_bb(x, y, major, minor, angle_deg=0):
"Compute tight ellipse bounding box."
t = np.arctan(-minor / 2 * np.tan(np.radians(angle_deg)) / (major / 2))
[max_x, min_x] = [x + major / 2 * np.cos(t) * np.cos(np.radians(angle_deg)) -
minor / 2 * np.sin(t) * np.sin(np.radians(angle_deg)) for t in (t, t + np.pi)]
t = np.arctan(minor / 2 * 1. / np.tan(np.radians(angle_deg)) / (major / 2))
[max_y, min_y] = [y + minor / 2 * np.sin(t) * np.cos(np.radians(angle_deg)) +
major / 2 * np.cos(t) * np.sin(np.radians(angle_deg)) for t in (t, t + np.pi)]
return min_x, min_y, max_x, max_y
def _getink(self, ink, fill=None):
if ink is None and fill is None:
if self.fill:
fill = self.ink
else:
ink = self.ink
else:
if ink is not None:
if isinstance(ink, basstring):
ink = ImageColor().getcolor(ink, self.mode)
if self.palette and not isinstance(ink, numbers.Number):
ink = self.palette.getcolor(ink)
if not self.mode[0] in ("1", "L", "I", "F") and isinstance(ink, numbers.Number):
ink = (0, 0, ink)
# ink = self.draw.draw_ink(ink, self.mode)
# convert BGR -> RGB
ink = self._convert_bgr2rgb(ink)
if fill is not None:
if isinstance(fill, basstring):
fill = ImageColor().getcolor(fill, self.mode)
if self.palette and not isinstance(fill, numbers.Number):
fill = self.palette.getcolor(fill)
if not self.mode[0] in ("1", "L", "I", "F") and isinstance(fill, numbers.Number):
fill = (0, 0, fill)
# fill = self.draw.draw_ink(fill, self.mode)
# convert BGR -> RGB
fill = self._convert_bgr2rgb(fill)
return ink, fill
def _get_ell_elements(self, box):
x1, y1, x2, y2 = box
axis1 = x2-x1
axis2 = y2-y1
center = (x1+axis1//2, y1+axis2//2)
return center, axis1, axis2
def _get_pointFromEllipseAngle(self, centerx, centery, radiush, radiusv, ang):
"""calculate point (x,y) for a given angle ang on an ellipse with its center at centerx, centery and
its horizontal radiush and its vertical radiusv"""
th = np.radians(ang)
ratio = (radiush/2.0)/float(radiusv/2.0)
x = centerx + radiush/2.0 * np.cos(th)
y = centery + radiusv/2.0 * np.sin(th)
return int(x), int(y)
def _multiline_check(self, text):
"Draw text."
split_character = "\n" if isinstance(text, str) else b"\n"
return split_character in text
def _multiline_split(self, text):
split_character = "\n" if isinstance(text, str) else b"\n"
return text.split(split_character)
def arc(self, box, start, end, fill=None, width=1, line=False, linecenter=False, fillcolor=None):
"Draw an arc."
while end<start:
end = end + 360
if fillcolor is not None:
fill = fillcolor
ink, fill = self._getink(fill)
if ink is not None or fill is not None:
center, axis1, axis2 = self._get_ell_elements(box)
axes = (axis1//2, axis2//2)
if linecenter:
if fillcolor:
cv2.ellipse(self._img_instance, center, axes, 0, start, end, fillcolor, -1)
else:
cv2.ellipse(self._img_instance, center, axes, 0, start, end, fillcolor, width)
startx, starty = self._get_pointFromEllipseAngle(center[0], center[1], axis1, axis2, start)
endx, endy = self._get_pointFromEllipseAngle(center[0], center[1], axis1, axis2, end)
st = (startx, starty)
e = (endx, endy)
cv2.line(self._img_instance, center, st, ink, width)
cv2.line(self._img_instance, center, e, ink, width)
self.img._instance = self._img_instance
else:
cv2.ellipse(self._img_instance, center, axes, 0, start, end, ink, width)
self.img._instance = self._img_instance
if line:
startx, starty = self._get_pointFromEllipseAngle(center[0], center[1], axis1, axis2, start)
endx, endy = self._get_pointFromEllipseAngle(center[0], center[1], axis1, axis2, end)
st = (startx, starty)
e = (endx, endy)
cv2.line(self._img_instance, st, e, ink, width)
if fillcolor is not None:
mid_line = ((startx+endx)//2, (starty+endy)//2)
mid_ang = (start+end)//2
midx, midy = self._get_pointFromEllipseAngle(center[0], center[1], axis1, axis2, mid_ang)
mid_chord = ((mid_line[0]+midx)//2, (mid_line[1]+midy)//2)
h, w = self._img_instance.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
cv2.floodFill(self._img_instance, mask, mid_chord, fillcolor)
self.img._instance = self._img_instance
def bitmap(self, xy, bitmap, fill=None):
"Draw a bitmap."
ink, fill = self._getink(fill)
if ink is None:
ink = fill
if ink is not None:
box = (xy[0], xy[1], bitmap._instance.shape[1]+xy[0], bitmap._instance.shape[0]+xy[1])
self.img.paste(ink, box, mask=bitmap)
def chord(self, box, start, end, fill=None, outline=None, width=1):
"Draw a chord."
ink, fill = self._getink(outline, fill)
if fill is not None:
self.arc(box, start, end, ink, width, line=True, fillcolor=fill)
# self.draw.draw_chord(xy, start, end, fill, 1)
if ink is not None and ink != fill:
self.arc(box, start, end, ink, width, line=True)
# self.draw.draw_chord(xy, start, end, ink, 0, width)
def ellipse(self, box, fill=None, outline=None, width=1):
"Draw an ellipse inside the bounding box like cv2.ellipse(img, box, color[, thickness)]"
ink, fill = self. _getink(outline, fill)
center, axis1, axis2 = self._get_ell_elements(box)
ebox = (center, (axis1, axis2), 0)
if fill is not None:
cv2.ellipse(self._img_instance, ebox, fill, -1)
self.img._instance = self._img_instance
if ink is not None and ink != fill:
cv2.ellipse(self._img_instance, ebox, ink, width)
self.img._instance = self._img_instance
def getfont(self):
"""Get the current default font.
:returns: An image font."""
if self.font is None:
self.font = cv2.FONT_HERSHEY_SIMPLEX
return self.font
def line(self, xy, fill=None, width=1, joint=None):
"Draw a line."
ink = self._getink(fill)[0]
coord = self._get_coordinates(xy)
for co in range(0, len(coord), 4):
start = (coord[co], coord[co+1])
end = (coord[co+2], coord[co+3])
cv2.line(self._img_instance, start, end, ink, width)
self.img._instance = self._img_instance
if joint == "curve" and width > 4:
for i in range(1, len(xy)-1):
point = xy[i]
angles = [
np.degrees(np.arctan2(
end[0] - start[0], start[1] - end[1]
)) % 360
for start, end in ((xy[i-1], point), (point, xy[i+1]))
]
if angles[0] == angles[1]:
# This is a straight line, so no joint is required
continue
def coord_at_angle(coord, angle):
x, y = coord
angle -= 90
distance = width/2 - 1
return tuple([
p +
(np.floor(p_d) if p_d > 0 else np.ceil(p_d))
for p, p_d in
((x, distance * np.cos(np.radians(angle))),
(y, distance * np.sin(np.radians(angle))))
])
flipped = ((angles[1] > angles[0] and
angles[1] - 180 > angles[0]) or
(angles[1] < angles[0] and
angles[1] + 180 > angles[0]))
coords = [
(point[0] - width/2 + 1, point[1] - width/2 + 1),
(point[0] + width/2 - 1, point[1] + width/2 - 1)
]
if flipped:
start, end = (angles[1] + 90, angles[0] + 90)
else:
start, end = (angles[0] - 90, angles[1] - 90)
self.pieslice(coords, start - 90, end - 90, fill)
if width > 8:
# Cover potential gaps between the line and the joint
if flipped:
gapCoords = [
coord_at_angle(point, angles[0]+90),
point,
coord_at_angle(point, angles[1]+90)
]
else:
gapCoords = [
coord_at_angle(point, angles[0]-90),
point,
coord_at_angle(point, angles[1]-90)
]
self.line(gapCoords, fill, width=3)
def multiline_text(self, xy, text, fill=None, font=cv2.FONT_HERSHEY_SIMPLEX, anchor=None,
spacing=4, align="left", direction=None, features=None, scale=0.4, thickness=1):
widths = []
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font, scale=scale, thickness=thickness)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font, scale=scale, thickness=thickness)
widths.append(line_width)
max_width = max(max_width, line_width)
left, top = xy
for idx, line in enumerate(lines):
if align == "left":
pass # left = x
elif align == "center":
left += (max_width - widths[idx]) / 2.0
elif align == "right":
left += (max_width - widths[idx])
else:
raise ValueError('align must be "left", "center" or "right"')
self.text((left, top), line, fill=fill, font=font, anchor=anchor, scale=scale, thickness=thickness,
calledfrommultilines=True, direction=direction, features=features)
top += line_spacing
left = xy[0]
def multiline_textsize(self, text, font=cv2.FONT_HERSHEY_SIMPLEX, spacing=4, direction=None, features=None, scale=0.4, thickness=1):
max_width = 0
lines = self._multiline_split(text)
line_spacing = self.textsize('A', font=font, scale=scale, thickness=thickness)[1] + spacing
for line in lines:
line_width, line_height = self.textsize(line, font, spacing, direction, features, scale=scale, thickness=thickness)
max_width = max(max_width, line_width)
return max_width, len(lines)*line_spacing - spacing
def pieslice(self, box, start, end, fill=None, outline=None, width=1):
"Draw a pieslice."
ink, fill = self._getink(outline, fill)
if fill is not None:
self.arc(box, start, end, fill, width, linecenter=True, fillcolor=fill)
# self.draw.draw_pieslice(xy, start, end, fill, 1)
if ink is not None and ink != fill:
self.arc(box, start, end, ink, width, linecenter=True)
# self.draw.draw_pieslice(xy, start, end, ink, 0, width)
def _point(self, x, y, fill=None):
"Draw a point without transformations"
elem = (x, y)
cv2.circle(self._img_instance, elem, 1, fill, thickness=-1)
self.img._instance = self._img_instance
def point(self, xy, fill=None, width=1):
"Draw a point."
ink, fill = self._getink(fill)
coord = self._get_coordinates(xy)
for co in range(0, len(coord), 2):
elem = (coord[co], coord[co+1])
# cv2.line(self._img_instance, elem, elem, ink, width)
cv2.circle(self._img_instance, elem, width, ink, thickness=-1)
self.img._instance = self._img_instance
def polygon(self, xy, fill=None, outline=None):
"Draw a polygon."
ink, fill = self._getink(outline, fill)
coord = self._get_coordinates(xy)
coord = np.array(coord, np.int32)
coord = np.reshape(coord, (len(coord)//2, 2))
if fill is not None:
# self.draw.draw_polygon(xy, fill, 1)
try:
cv2.fillPoly(self._img_instance, [coord], fill)
except:
coord = coord.reshape((-1, 1, 2))
cv2.fillPoly(self._img_instance, [coord], fill)
self.img._instance = self._img_instance
if ink is not None and ink != fill:
# self.draw.draw_polygon(xy, ink, 0)
try:
cv2.polylines(self._img_instance, [coord], True, ink)
except:
coord = coord.reshape((-1, 1, 2))
cv2.polylines(self._img_instance, [coord], True, ink)
self.img._instance = self._img_instance
def rectangle(self, xy, fill=None, outline=None, width=1):
"Draw a rectangle."
ink, fill = self._getink(outline, fill)
coord = self._get_coordinates(xy)
if fill is not None:
cv2.rectangle(self._img_instance, tuple(coord[:2]), tuple(coord[2:4]), fill, -width)
self.img._instance = self._img_instance
if ink is not None and ink != fill:
cv2.rectangle(self._img_instance, tuple(coord[:2]), tuple(coord[2:4]), ink, width)
self.img._instance = self._img_instance
def setink(self):
"Set ink to standard black by default"
if len(self._img_instance.shape) == 2:
channels = 1
else:
channels = self._img_instance.shape[2]
depth = self._img_instance.dtype
if channels == 1 and depth == np.bool:
self.ink = False
if channels == 1 and depth == np.uint8:
self.ink = 0
if channels == 2 and depth == np.uint8:
self.ink = (0, 255)
if channels == 3 and depth == np.uint8:
self.ink = (0, 0, 0)
if channels == 4 and depth == np.uint8:
self.ink = (0, 0, 0, 255)
if channels == 1 and depth == np.int32:
self.ink = 0
if channels == 1 and depth == np.float32:
self.ink = 0.0
if channels == 1 and depth == np.float64:
self.ink = 0.0
def text(self, xy, text, fill=None, font=cv2.FONT_HERSHEY_SIMPLEX, anchor=None, scale=0.4, thickness=1, calledfrommultilines=False, *args, **kwargs):
fontFace = font
fontScale = scale
if not calledfrommultilines and not isinstance(fontFace, freetype.Face):
if self._multiline_check(text):
return self.multiline_text(xy, text, fill, font, anchor, scale=scale, thickness=thickness, *args, **kwargs)
ink, fill = self._getink(fill)
if fontFace is None:
fontFace = self.getfont()
if ink is None:
ink = fill
if ink is not None:
if not isinstance(fontFace, freetype.Face):
w, h = self.textsize(text, font=fontFace, scale=scale, thickness=thickness)
xy = (xy[0], xy[1]+h)
cv2.putText(self._img_instance, text, xy, fontFace, fontScale, ink, thickness)
self.img._instance = self._img_instance
else:
if self._multiline_check(text):
lines = text.split("\n")
else:
lines =[text]
old_height = 0
for line in lines:
# First pass to compute bbox
width, height, baseline = getsize(line, font)
# Second pass for actual rendering
Z = getmask(line, font)
# cv2.imshow("Z", Z)
# cv2.waitKey()
MaskImg = Image(Z)
img = np.zeros(self.img._instance.shape, dtype=self.img._instance.dtype)
if len(self.img._instance.shape)>2:
if self.img._instance.shape[2] >= 2:
img[:,:,0] = ink[0]
img[:,:,1] = ink[1]
if self.img._instance.shape[2] >= 3:
img[:,:,2] = ink[2]
if self.img._instance.shape[2] == 4:
img[:,:,3] = 255
else:
img[:] = ink
TextImg = Image(img)
box = [int(xy[0]), int(xy[1]+old_height)]
self.img.paste(TextImg, box=box, mask=MaskImg)
self._img_instance = self.img._instance
old_height = old_height + height
def textsize(self, text, font=cv2.FONT_HERSHEY_SIMPLEX, spacing=4, direction=None, features=None, scale=0.4, thickness=1):
"Get the size of a given string, in pixels."
fontFace = font
fontScale = scale
if self._multiline_check(text):
return self.multiline_textsize(text, font, spacing, direction, features, scale=scale, thickness=thickness)
if not isinstance(fontFace, freetype.Face):
if font is None:
fontFace = self.getfont()
size = cv2.getTextSize(text, fontFace, fontScale, thickness)
text_width = size[0][0]
text_height = size[0][1]
return (text_width, text_height)
else:
width, height, baseline = getsize(text, fontFace)
return (width, height)
def Draw(im, mode=None):
"""
A simple 2D drawing interface for PIL images.
:param im: The image to draw in.
:param mode: Optional mode to use for color values. For RGB
images, this argument can be RGB or RGBA (to blend the
drawing into the image). For all other modes, this argument
must be the same as the image mode. If omitted, the mode
defaults to the mode of the image.
"""
# try:
# return im.getdraw(mode)
# except AttributeError:
# return ImageDraw(im, mode)
return ImageDraw(im)
def floodfill(image, xy, value, border=None, thresh=0, flags=130820):
"""
(experimental) Fills a bounded region with a given color.
:param image: Target image.
:param xy: Seed position (a 2-item coordinate tuple). See
:ref:`coordinate-system`.
:param value: Fill color.
:param border: Optional border value. If given, the region consists of
pixels with a color different from the border color. If not given,
the region consists of pixels having the same color as the seed
pixel.
:param thresh: Optional threshold value which specifies a maximum
tolerable difference of a pixel value from the 'background' in
order for it to be replaced. Useful for filling regions of
non-homogeneous, but similar, colors.
"""
_img_instance = image.getim()
if isinstance(value, tuple) or isinstance(value, list):
value = value[::-1]
h, w = _img_instance.shape[:2]
mask = np.zeros((h+2, w+2), np.uint8)
mask[:] = 0
lo = hi = thresh
xy = tuple([int(i) for i in xy])
cv2.floodFill(_img_instance, mask, xy, value, (lo,)*3, (hi,)*3, flags)
class ImageColor(object):
def getcolor(self, color, mode):
"""
Same as :py:func:`~PIL.ImageColor.getrgb`, but converts the RGB value to a
greyscale value if the mode is not color or a palette image. If the string
cannot be parsed, this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(graylevel [, alpha]) or (red, green, blue[, alpha])``
"""
# same as getrgb, but converts the result to the given mode
color, alpha = self.getrgb(color), 255
if len(color) == 4:
color, alpha = color[0:3], color[3]
if getmodebase(mode) == "L":
r, g, b = color
color = (r*299 + g*587 + b*114)//1000
if mode[-1] == 'A':
return (color, alpha)
else:
if mode[-1] == 'A':
return color + (alpha,)
return color
def getrgb(self, color):
"""
Convert a color string to an RGB tuple. If the string cannot be parsed,
this function raises a :py:exc:`ValueError` exception.
.. versionadded:: 1.1.4
:param color: A color string
:return: ``(red, green, blue[, alpha])``
"""
color = color.lower()
rgb = colormap.get(color, None)
if rgb:
if isinstance(rgb, tuple):
return rgb
colormap[color] = rgb = self.getrgb(rgb)
return rgb
# check for known string formats
if re.match('#[a-f0-9]{3}$', color):
return (
int(color[1]*2, 16),
int(color[2]*2, 16),
int(color[3]*2, 16),
)
if re.match('#[a-f0-9]{4}$', color):
return (
int(color[1]*2, 16),
int(color[2]*2, 16),
int(color[3]*2, 16),
int(color[4]*2, 16),
)
if re.match('#[a-f0-9]{6}$', color):
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16),
)
if re.match('#[a-f0-9]{8}$', color):
return (
int(color[1:3], 16),
int(color[3:5], 16),
int(color[5:7], 16),
int(color[7:9], 16),
)
m = re.match(r"rgb\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$", color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3))
)
m = re.match(r"rgb\(\s*(\d+)%\s*,\s*(\d+)%\s*,\s*(\d+)%\s*\)$", color)
if m:
return (
int((int(m.group(1)) * 255) / 100.0 + 0.5),
int((int(m.group(2)) * 255) / 100.0 + 0.5),
int((int(m.group(3)) * 255) / 100.0 + 0.5)
)
m = re.match(
r"hsl\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$",
color,
)
if m:
from colorsys import hls_to_rgb
rgb = hls_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(3)) / 100.0,
float(m.group(2)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5)
)
m = re.match(
r"hs[bv]\(\s*(\d+\.?\d*)\s*,\s*(\d+\.?\d*)%\s*,\s*(\d+\.?\d*)%\s*\)$",
color,
)
if m:
from colorsys import hsv_to_rgb
rgb = hsv_to_rgb(
float(m.group(1)) / 360.0,
float(m.group(2)) / 100.0,
float(m.group(3)) / 100.0,
)
return (
int(rgb[0] * 255 + 0.5),
int(rgb[1] * 255 + 0.5),
int(rgb[2] * 255 + 0.5)
)
m = re.match(r"rgba\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$",
color)
if m:
return (
int(m.group(1)),
int(m.group(2)),
int(m.group(3)),
int(m.group(4))
)
raise ValueError("unknown color specifier: %r" % color)
class ModeDescriptor(object):
"""Wrapper for mode strings."""
def __init__(self, mode, bands, basemode, basetype):
self.mode = mode
self.bands = bands
self.basemode = basemode
self.basetype = basetype
def __str__(self):
return self.mode
class ImageMode(object):
def getmode(self, mode):
"""Gets a mode descriptor for the given mode."""
modes = {}
# core modes
for m, (basemode, basetype, bands) in _MODEINFO.items():
modes[m] = ModeDescriptor(m, bands, basemode, basetype)
# extra experimental modes
modes["RGBa"] = ModeDescriptor("RGBa",
("R", "G", "B", "a"), "RGB", "L")
modes["LA"] = ModeDescriptor("LA", ("L", "A"), "L", "L")
modes["La"] = ModeDescriptor("La", ("L", "a"), "L", "L")
modes["PA"] = ModeDescriptor("PA", ("P", "A"), "RGB", "L")
# mapping modes
modes["I;16"] = ModeDescriptor("I;16", "I", "L", "L")
modes["I;16L"] = ModeDescriptor("I;16L", "I", "L", "L")
modes["I;16B"] = ModeDescriptor("I;16B", "I", "L", "L")
# set global mode cache atomically
_modes = modes
return _modes[mode]
def _check_size(size):
"""
Common check to enforce type and sanity check on size tuples
:param size: Should be a 2 tuple of (width, height)
:returns: True, or raises a ValueError
"""
if not isinstance(size, (list, tuple)):
raise ValueError("Size must be a tuple")
if len(size) != 2:
raise ValueError("Size must be a tuple of length 2")
if size[0] < 0 or size[1] < 0:
raise ValueError("Width and height must be >= 0")
return True
def new(mode, size, color=0):
"""
Creates a new image with the given mode and size.
:param mode: The mode to use for the new image. See:
:ref:`concept-modes`.
:param size: A 2-tuple, containing (width, height) in pixels.
:param color: What color to use for the image. Default is black.
If given, this should be a single integer or floating point value
for single-band modes, and a tuple for multi-band modes (one value
per band). When creating RGB images, you can also use color
strings as supported by the ImageColor module. If the color is
None, the image is not initialised.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
_check_size(size)
if color is None:
# don't initialize
_im = Image()._new(mode, size)
return Image(_im)
if type(color).__name__ == "str":
# css3-style specifier
color = ImageColor().getcolor(color, mode)
color = ImageDraw(None)._convert_bgr2rgb(color)
_im = Image()._new(mode, size, color)
return Image(_im)
def frombytes(mode, size, data, decoder_name="raw", *args):
"""
Creates a copy of an image memory from pixel data in a buffer.
In its simplest form, this function takes three arguments
(mode, size, and unpacked pixel data).
You can also use any pixel decoder supported by PIL. For more
information on available decoders, see the section
:ref:`Writing Your Own File Decoder <file-decoders>`.
Note that this function decodes pixel data only, not entire images.
If you have an entire image in a string, wrap it in a
:py:class:`~io.BytesIO` object, and use :py:func:`~PIL.Image.open` to load
it.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A byte buffer containing raw data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
_check_size(size)
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw" and args == ():
args = mode
im = new(mode, size)
im.frombytes(mode, size, data, decoder_name, args)
return im
def fromstring(mode, size, data, decoder_name="raw", *args):
# raise NotImplementedError("fromstring() has been removed. " +
# "Please call frombytes() instead.")
return frombytes(mode, size, data, decoder_name, *args)
def frombuffer(mode, size, data, decoder_name="raw", *args):
"""
Creates an image memory referencing pixel data in a byte buffer.
This function is similar to :py:func:`~PIL.Image.frombytes`, but uses data
in the byte buffer, where possible. This means that changes to the
original buffer object are reflected in this image). Not all modes can
share memory; supported modes include "L", "RGBX", "RGBA", and "CMYK".
Note that this function decodes pixel data only, not entire images.
If you have an entire image file in a string, wrap it in a
**BytesIO** object, and use :py:func:`~PIL.Image.open` to load it.
In the current version, the default parameters used for the "raw" decoder
differs from that used for :py:func:`~PIL.Image.frombytes`. This is a
bug, and will probably be fixed in a future release. The current release
issues a warning if you do this; to disable the warning, you should provide
the full set of parameters. See below for details.
:param mode: The image mode. See: :ref:`concept-modes`.
:param size: The image size.
:param data: A bytes or other buffer object containing raw
data for the given mode.
:param decoder_name: What decoder to use.
:param args: Additional parameters for the given decoder. For the
default encoder ("raw"), it's recommended that you provide the
full set of parameters::
frombuffer(mode, size, data, "raw", mode, 0, 1)
:returns: An :py:class:`~PIL.Image.Image` object.
.. versionadded:: 1.1.4
"""
_check_size(size)
# may pass tuple instead of argument list
if len(args) == 1 and isinstance(args[0], tuple):
args = args[0]
if decoder_name == "raw":
if args == ():
args = mode, 0, -1 # may change to (mode, 0, 1) post-1.1.6
if args[0] in _MAPMODES:
channels, depth = Image()._get_channels_and_depth(mode)
im = np.frombuffer(data)
im = im.reshape((size[1], size[0], channels))
im = im.astype(depth)
im_ = new(mode, (1, 1))
im_._instance = im
im_.readonly = 1
return im_
return frombytes(mode, size, data, decoder_name, args)
def fromarray(obj, mode=None):
"""
Creates an image memory from an object exporting the array interface
(using the buffer protocol).
If **obj** is not contiguous, then the tobytes method is called
and :py:func:`~PIL.Image.frombuffer` is used.
If you have an image in NumPy::
from PIL import Image
import numpy as np
im = Image.open('hopper.jpg')
a = np.asarray(im)
Then this can be used to convert it to a Pillow image::
im = Image.fromarray(a)
:param obj: Object with array interface
:param mode: Mode to use (will be determined from type if None)
See: :ref:`concept-modes`.
:returns: An image object.
.. versionadded:: 1.1.6
"""
if isinstance(obj, np.ndarray):
_mode = Image()._get_mode(obj.shape, obj.dtype)
if _mode == 'RGB':
obj = cv2.cvtColor(obj, cv2.COLOR_RGB2BGR)
elif mode == "RGBA":
obj = cv2.cvtColor(obj, cv2.COLOR_RGBA2BGRA)
return Image(obj)
else:
raise TypeError("Cannot handle this data type")
_fromarray_typemap = {
# (shape, typestr) => mode, rawmode
# first two members of shape are set to one
((1, 1), "|b1"): ("1", "1;8"),
((1, 1), "|u1"): ("L", "L"),
((1, 1), "|i1"): ("I", "I;8"),
((1, 1), "<u2"): ("I", "I;16"),
((1, 1), ">u2"): ("I", "I;16B"),
((1, 1), "<i2"): ("I", "I;16S"),
((1, 1), ">i2"): ("I", "I;16BS"),
((1, 1), "<u4"): ("I", "I;32"),
((1, 1), ">u4"): ("I", "I;32B"),
((1, 1), "<i4"): ("I", "I;32S"),
((1, 1), ">i4"): ("I", "I;32BS"),
((1, 1), "<f4"): ("F", "F;32F"),
((1, 1), ">f4"): ("F", "F;32BF"),
((1, 1), "<f8"): ("F", "F;64F"),
((1, 1), ">f8"): ("F", "F;64BF"),
((1, 1, 2), "|u1"): ("LA", "LA"),
((1, 1, 3), "|u1"): ("RGB", "RGB"),
((1, 1, 4), "|u1"): ("RGBA", "RGBA"),
}
# shortcuts
_fromarray_typemap[((1, 1), _ENDIAN + "i4")] = ("I", "I")
_fromarray_typemap[((1, 1), _ENDIAN + "f4")] = ("F", "F")
def open(fl, mode='r'):
_mode = None
_format = None
if isinstance(fl, basstring):
if not os.path.isfile(fl):
raise IOError("cannot find image file", fl)
if os.path.splitext(fl)[1].lower() == ".gif":
if gif2numpy_installed:
_instances, _exts, _image_specs = gif2numpy.convert(fl)
_instance = _instances[0]
img = Image(_instance, fl, instances = _instances, exts = _exts, image_specs = _image_specs)
else:
raise NotImplementedError("gif2numpy has not been installed. Unable to read gif images, install it with: pip install gif2numpy")
else:
_instance = cv2.imread(fl, cv2.IMREAD_UNCHANGED)
# _mode = Image()._get_mode(_instance.shape, _instance.dtype)
img = Image(_instance, fl)
return img
if isinstance(fl, fil_object):
file_bytes = np.asarray(bytearray(fl.read()), dtype=np.uint8)
_instance = cv2.imdecode(file_bytes, cv2.IMREAD_UNCHANGED)
# _mode = Image()._get_mode(_instance.shape, _instance.dtype)
img = Image(_instance)
return img
if not py3:
if isinstance(fl, cStringIO.InputType):
fl.seek(0)
img_array = np.asarray(bytearray(fl.read()), dtype=np.uint8)
return Image(cv2.imdecode(img_array, 1))
if hasattr(fl, 'mode'):
image = np.array(fl)
_mode = fl.mode
if _mode == 'RGB':
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
_instance = image
img = Image(_instance)
return img
def blend(img1, img2, alpha):
"blends 2 images using an alpha value>=0.0 and <=1.0"
dst = cv2.addWeighted(img1, 1.0-alpha, img2, alpha, 0)
return Image(dst)
def composite(background, foreground, mask, np_image=False, neg_mask=False):
"pastes the foreground image into the background image using the mask"
# Convert uint8 to float
if isinstance(background, np.ndarray):
foreground = foreground.astype(float)
old_type = background.dtype
background = background.astype(float)
# Normalize the alpha mask to keep intensity between 0 and 1
if neg_mask:
alphamask = mask.astype(float)/255
else:
alphamask = (~mask).astype(float)/255
else:
foreground = foreground._instance.astype(float)
old_type = background.dtype
background = background._instance.astype(float)
# Normalize the alpha mask to keep intensity between 0 and 1
if neg_mask:
alphamask = mask._instance.astype(float)/255
else:
alphamask = (~(mask._instance)).astype(float)/255
fslen = len(foreground.shape)
if len(alphamask.shape) != fslen:
img = np.zeros(foreground.shape, dtype=foreground.dtype)
if fslen>2:
if foreground.shape[2] >= 2:
img[:,:,0] = alphamask
img[:,:,1] = alphamask
if foreground.shape[2] >= 3:
img[:,:,2] = alphamask
if foreground.shape[2] == 4:
img[:,:,3] = alphamask
alphamask = img.copy()
# Multiply the foreground with the alpha mask
try:
foreground = cv2.multiply(alphamask, foreground)
except:
if alphamask.shape[2] == 1 and foreground.shape[2] == 3:
triplemask = cv2.merge((alphamask, alphamask, alphamask))
foreground = cv2.multiply(triplemask, foreground)
else:
raise ValueError("OpenCV Error: Sizes of input arguments do not match (The operation is neither 'array op array' (where arrays have the same size and the same number of channels), nor 'array op scalar', nor 'scalar op array') in cv::arithm_op, file ..\..\..\..\opencv\modules\core\src\arithm.cpp")
# Multiply the background with ( 1 - alpha )
bslen = len(background.shape)
if len(alphamask.shape) != bslen:
img = np.zeros(background.shape, dtype=background.dtype)
if bslen>2:
if background.shape[2] >= 2:
img[:,:,0] = alphamask
img[:,:,1] = alphamask
if background.shape[2] >= 3:
img[:,:,2] = alphamask
if background.shape[2] == 4:
img[:,:,3] = alphamask
alphamask = img.copy()
try:
background = cv2.multiply(1.0 - alphamask, background)
except:
if alphamask.shape[2] == 1 and foreground.shape[2] == 3:
background = cv2.multiply(1.0 - triplemask, background)
else:
raise ValueError("OpenCV Error: Sizes of input arguments do not match (The operation is neither 'array op array' (where arrays have the same size and the same number of channels), nor 'array op scalar', nor 'scalar op array') in cv::arithm_op, file ..\..\..\..\opencv\modules\core\src\arithm.cpp")
# Add the masked foreground and background
outImage = cv2.add(foreground, background)
outImage = outImage/255
outImage = outImage*255
outImage = outImage.astype(old_type)
if np_image:
return outImage
else:
return Image(outImage)
def alpha_composite(im1, im2):
"""
Alpha composite im2 over im1.
:param im1: The first image. Must have mode RGBA.
:param im2: The second image. Must have mode RGBA, and the same size as
the first image.
:returns: An :py:class:`~PIL.Image.Image` object.
"""
r1, g1, b1, a1 = Image().split(im1)
r2, g2, b2, a2 = Image().split(im2)
alphacomp = np.zeros(im1.shape, dtype=im1.dtype)
im3 = composite(alphacomp, im1, a1)
alphacomp = np.zeros(im2.shape, dtype=im2.dtype)
im4 = composite(alphacomp, im2, a2)
return blend(im3, im4, 0.5)
def merge(mode, colorbandtuple, image=False):
"merges three channels to one band"
if len(colorbandtuple) == 2:
red, green = colorbandtuple
blue = None
alpha = None
elif len(colorbandtuple) == 3:
red, green, blue = colorbandtuple
alpha = None
elif len(colorbandtuple) == 4:
red, green, blue, alpha = colorbandtuple
channels, depth = Image()._get_channels_and_depth(mode)
img_dim = red.shape
img = np.zeros((img_dim[0], img_dim[1], channels), dtype=depth)
img[:,:,0] = red
img[:,:,1] = green
if blue is not None:
img[:,:,2] = blue
if alpha is not None:
img[:,:,3] = alpha
if image:
return img
else:
return Image(img)
def linear_gradient(mode, size=256):
"Generate 256x256 linear gradient from black to white, top to bottom."
channels, depth = Image()._get_channels_and_depth(mode)
if channels == 1:
y = np.linspace(0, size-1, size)
gradient = np.tile(y, (size, 1)).T
gradient = gradient.astype(depth)
return gradient
elif channels > 3:
y = np.linspace(0, size-1, size)
gradient = np.tile(y, (channels, size, 1)).T
gradient = gradient.astype(depth)
return gradient
def radial_gradient(mode, size=256, innerColor=(0, 0, 0), outerColor=(255, 255, 255)):
"Generate 256x256 radial gradient from black to white, centre to edge."
channels, depth = Image()._get_channels_and_depth(mode)
gradient = np.zeros((size, size, channels), dtype=depth)
if channels == 1:
_max_value = 1
x_axis = np.linspace(-_max_value, _max_value, size)[:, None]
y_axis = np.linspace(-_max_value, _max_value, size)[None, :]
gradient = np.sqrt(x_axis ** 2 + y_axis ** 2)
if innerColor == 255 or innerColor == (255, 255, 255):
gradient = _max_value-gradient
return gradient
elif channels ==3:
inner = np.array([0, 0, 0])[None, None, :]
outer = np.array([1, 1, 1])[None, None, :]
if gradient.max() != 0:
gradient /= gradient.max()
gradient = gradient[:, :, None]
gradient = gradient * outer + (1 - gradient) * inner
# gradient = gradient/255.0*255
return gradient
else:
imgsize = gradient.shape[:2]
for y in range(imgsize[1]):
for x in range(imgsize[0]):
#Find the distance to the center
distanceToCenter = np.sqrt((x - imgsize[0]//2) ** 2 + (y - imgsize[1]//2) ** 2)
#Make it on a scale from 0 to 1innerColor
distanceToCenter = distanceToCenter / (np.sqrt(2) * imgsize[0]/2)
#Calculate r, g, and b values
r = outerColor[0] * distanceToCenter + innerColor[0] * (1 - distanceToCenter)
g = outerColor[1] * distanceToCenter + innerColor[1] * (1 - distanceToCenter)
b = outerColor[2] * distanceToCenter + innerColor[2] * (1 - distanceToCenter)
a = outerColor[2] * distanceToCenter + innerColor[2] * (1 - distanceToCenter)
#Place the pixel
gradient[y, x] = (int(r), int(g), int(b), int(a))
return gradient
def constant(image, value):
"Fill a channel with a given grey level"
return Image.new("L", image.size, value)
def duplicate(image):
"Create a copy of a channel"
return image.copy()
def invert(image, im=None):
"Invert a channel"
if im is None:
return ~image.getim()
else:
return ~im
def _reduce_images(image1, image2):
"bring two images to an identical size using the minimum side of each image"
s0 = min(image1._instance.shape[0], image2._instance.shape[0])
s1 = min(image1._instance.shape[1], image2._instance.shape[1])
image1_copy = image1._instance[:s0,:s1]
image2_copy = image2._instance[:s0,:s1]
return image1_copy, image2_copy
def lighter(image1, image2):
"Select the lighter pixels from each image"
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.maximum(image1_copy, image2_copy)
def darker(image1, image2):
"Select the darker pixels from each image"
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.minimum(image1_copy, image2_copy)
def difference(image1, image2):
"Subtract one image from another"
# does not work as in PIL, needs to be fixed
# Calculate absolute difference
# (abs(image1 - image2)).
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.absolute(np.subtract(image1_copy, image2_copy))
def multiply(image1, image2):
"Superimpose two positive images"
# broken, needs to be fixed
# Superimpose positive images
# (image1 * image2 / MAX).
# <p>
# Superimposes two images on top of each other. If you multiply an
# image with a solid black image, the result is black. If you multiply
# with a solid white image, the image is unaffected.
image1_copy, image2_copy = _reduce_images(image1, image2)
div = np.divide(image2_copy, 255)
return np.multiply(image1_copy, div)
def screen(image1, image2):
"Superimpose two negative images"
# Superimpose negative images
# (MAX - ((MAX - image1) * (MAX - image2) / MAX)).
# <p>
# Superimposes two inverted images on top of each other.
image1_copy, image2_copy = _reduce_images(image1, image2)
max_image = np.maximum(image1_copy, image2_copy)
return (max_image - ((max_image - image1_copy) * (max_image - image2_copy) / max_image))
def add(image1, image2, scale=1.0, offset=0):
"Add two images"
# ((image1 + image2) / scale + offset).
# Adds two images, dividing the result by scale and adding the
# offset. If omitted, scale defaults to 1.0, and offset to 0.0.
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.add(image1_copy, image2_copy)/scale+offset
def subtract(image1, image2, scale=1.0, offset=0):
"Subtract two images"
# Subtract images
# ((image1 - image2) / scale + offset).
# Subtracts two images, dividing the result by scale and adding the
# offset. If omitted, scale defaults to 1.0, and offset to 0.0.
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.subtract(image1_copy, image2_copy)/scale+offset
def add_modulo(image1, image2):
"Add two images without clipping"
# Add images without clipping
# ((image1 + image2) % MAX).
# Adds two images, without clipping the result.
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.mod(np.add(image1_copy, image2_copy), np.maximum(image1_copy, image2_copy))
def subtract_modulo(image1, image2):
"Subtract two images without clipping"
# Subtract images without clipping
# ((image1 - image2) % MAX).
# Subtracts two images, without clipping the result.
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.mod(np.subtract(image1_copy, image2_copy), np.maximum(image1_copy, image2_copy))
def logical_and(image1, image2):
"Logical and between two images"
# Logical AND
# (image1 and image2).
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.logical_and(image1_copy, image2_copy)
def logical_or(image1, image2):
"Logical or between two images"
# Logical OR
# (image1 or image2).
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.logical_or(image1_copy, image2_copy)
def logical_xor(image1, image2):
"Logical xor between two images"
# Logical XOR
# (image1 xor image2).
image1_copy, image2_copy = _reduce_images(image1, image2)
return np.logical_xor(image1_copy, image2_copy)
class Brightness(object):
def __init__(self, image):
self.image = image
def enhance(self, factor):
# Brightness control (0-100) which is 0.0 to 1.0 in original PIL
img = self.image.getim()
brightness = (1-factor)*-255
adjusted = cv2.addWeighted(img, 1.0, np.zeros(img.shape, img.dtype), 0, brightness)
return Image(adjusted)
class Contrast(object):
def __init__(self, image):
self.image = image
def enhance(self, factor):
img = self.image.getim()
# Contrast control factor which is 0.0 to 1.0 in original PIL
adjusted = cv2.convertScaleAbs(img, alpha=factor, beta=0)
return Image(adjusted)
class Filter(object):
pass
class MultibandFilter(Filter):
pass
class BuiltinFilter(MultibandFilter):
def filter(self, image):
if image.mode == "P":
raise ValueError("cannot filter palette images")
return image.filter(*self.filterargs)
class GaussianBlur(MultibandFilter):
"""Gaussian blur filter.
:param radius: Blur radius.
"""
name = "GaussianBlur"
def __init__(self, radius=2):
self.radius = radius
self.name = "GaussianBlur"
def filter(self, image):
kernel_size = self.radius*2+1
sigmaX = 0.3*((kernel_size-1)*0.5 - 1) + 0.8
dst = cv2.GaussianBlur(image._instance, (kernel_size, kernel_size), sigmaX, borderType=cv2.BORDER_DEFAULT)
return Image(dst)
class BLUR(BuiltinFilter):
name = "Blur"
filterargs = (5, 5), 16, 0, (
1, 1, 1, 1, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 0, 0, 0, 1,
1, 1, 1, 1, 1)
class CONTOUR(BuiltinFilter):
name = "Contour"
filterargs = (3, 3), 1, 255, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1)
class DETAIL(BuiltinFilter):
name = "Detail"
filterargs = (3, 3), 6, 0, (
0, -1, 0,
-1, 10, -1,
0, -1, 0)
class EDGE_ENHANCE(BuiltinFilter):
name = "Edge-enhance"
filterargs = (3, 3), 2, 0, (
-1, -1, -1,
-1, 10, -1,
-1, -1, -1)
class EDGE_ENHANCE_MORE(BuiltinFilter):
name = "Edge-enhance More"
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 9, -1,
-1, -1, -1)
class EMBOSS(BuiltinFilter):
name = "Emboss"
filterargs = (3, 3), 1, 128, (
-1, 0, 0,
0, 1, 0,
0, 0, 0)
class FIND_EDGES(BuiltinFilter):
name = "Find Edges"
filterargs = (3, 3), 1, 0, (
-1, -1, -1,
-1, 8, -1,
-1, -1, -1)
class SHARPEN(BuiltinFilter):
name = "Sharpen"
filterargs = (3, 3), 16, 0, (
-2, -2, -2,
-2, 32, -2,
-2, -2, -2)
class SMOOTH(BuiltinFilter):
name = "Smooth"
filterargs = (3, 3), 13, 0, (
1, 1, 1,
1, 5, 1,
1, 1, 1)
class SMOOTH_MORE(BuiltinFilter):
name = "Smooth More"
filterargs = (5, 5), 100, 0, (
1, 1, 1, 1, 1,
1, 5, 5, 5, 1,
1, 5, 44, 5, 1,
1, 5, 5, 5, 1,
1, 1, 1, 1, 1)
if __name__ == '__main__':
# var init
testfile = "lena1.jpg"
if os.path.isfile("lena.jpg"):
testfile = "lena.jpg"
elif os.path.isfile("Images/lena.jpg"):
testfile = "Images/lena.jpg"
else:
url_loc = "https://raw.githubusercontent.com/bunkahle/PILasOPENCV/master/tests/lena.jpg"
if py3:
import requests, builtins
f = builtins.open(testfile, "wb")
r = requests.get(url_loc)
f.write(r.content)
else:
import urllib2, cStringIO
imgdata = urllib2.urlopen(url_loc).read()
img = open(cStringIO.StringIO(imgdata))
img.save(testfile)
outfile1 = "lena1.bmp"
outfile2 = "lena2.bmp"
thsize = (128, 128)
box = (100, 100, 400, 400)
# the old style:
# from PIL import Image as PILImage
# pil_image = PILImage.open(testfile)
# print(pil_image.format, pil_image.size, pil_image.mode)
# pil_image.save(outfile1)
# pil_image.show()
# small_pil = pil_image.copy()
# small_pil.thumbnail(thsize)
# small_pil.show()
# region_pil = pil_image.crop(box)
# region_pil = region_pil.transpose(PILImage.ROTATE_180)
# pil_image.paste(region_pil, box)
# pil_image.show()
# the new style:
# if you import the library from site-packages import like this:
# import PILasOPENCV as Image
# im = Image.new("RGB", (512, 512), "white")
im = new("RGB", (512, 512), "red")
im.show()
print (type(im))
print(im.format, im.size, im.mode)
# None (512, 512) RGB
# <class 'Image'>
# im = Image.open(testfile)
im = open(testfile)
print(im.format, im.size, im.mode)
font_success = True
try:
font = truetype("arial.ttf", 28)
except:
font_success = False
draw = Draw(im)
if font_success:
text = "Lena's\nimage"
draw.text((249,435), text, font=font, fill=(0, 0, 0))
# JPEG (512, 512) RGB
# im.save(outfile2)
im.show()
small = im.copy()
small.thumbnail(thsize)
small.show()
region = im.crop(box)
print("region",region.format, region.size, region.mode)
# region = region.transpose(Image.ROTATE_180)
region = region.transpose(ROTATE_180)
region.show()
im.paste(region, box)
im.show() | 38.946406 | 310 | 0.547426 |
7956151cdba0ab2afd51b433efd4d89206bb2d70 | 22,195 | py | Python | tacker/sol_refactored/infra_drivers/openstack/openstack.py | h1r0mu/tacker | 8c69dda51fcfe215c4878a86b82018d2b96e5561 | [
"Apache-2.0"
] | 116 | 2015-10-18T02:57:08.000Z | 2022-03-15T04:09:18.000Z | tacker/sol_refactored/infra_drivers/openstack/openstack.py | h1r0mu/tacker | 8c69dda51fcfe215c4878a86b82018d2b96e5561 | [
"Apache-2.0"
] | 6 | 2016-11-07T22:15:54.000Z | 2021-05-09T06:13:08.000Z | tacker/sol_refactored/infra_drivers/openstack/openstack.py | h1r0mu/tacker | 8c69dda51fcfe215c4878a86b82018d2b96e5561 | [
"Apache-2.0"
] | 166 | 2015-10-20T15:31:52.000Z | 2021-11-12T08:39:49.000Z | # Copyright (C) 2021 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
import os
import pickle
import subprocess
from oslo_log import log as logging
from oslo_utils import uuidutils
from tacker.sol_refactored.common import config
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored.common import vnf_instance_utils as inst_utils
from tacker.sol_refactored.infra_drivers.openstack import heat_utils
from tacker.sol_refactored.infra_drivers.openstack import userdata_default
from tacker.sol_refactored import objects
LOG = logging.getLogger(__name__)
CONF = config.CONF
class Openstack(object):
def __init__(self):
pass
def instantiate(self, req, inst, grant_req, grant, vnfd):
# make HOT
fields = self.make_hot(req, inst, grant_req, grant, vnfd)
LOG.debug("stack fields: %s", fields)
# create stack
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
heat_client.create_stack(fields)
# wait stack created
stack_name = fields['stack_name']
heat_client.wait_stack_create(stack_name)
# get stack resource
heat_reses = heat_client.get_resources(stack_name)
# make instantiated_vnf_info
self.make_instantiated_vnf_info(req, inst, grant, vnfd, heat_reses)
def make_hot(self, req, inst, grant_req, grant, vnfd):
flavour_id = req.flavourId
hot_dict = vnfd.get_base_hot(flavour_id)
if not hot_dict:
raise sol_ex.BaseHOTNotDefined()
userdata = None
userdata_class = None
if req.obj_attr_is_set('additionalParams'):
userdata = req.additionalParams.get('lcm-operation-user-data')
userdata_class = req.additionalParams.get(
'lcm-operation-user-data-class')
if userdata is None and userdata_class is None:
LOG.debug("Processing default userdata instantiate")
# NOTE: objects used here are dict compat.
fields = userdata_default.DefaultUserData.instantiate(
req, inst, grant_req, grant, vnfd.csar_dir)
elif userdata is None or userdata_class is None:
# Both must be specified.
raise sol_ex.UserdataMissing()
else:
LOG.debug("Processing %s %s instantiate", userdata, userdata_class)
tmp_csar_dir = vnfd.make_tmp_csar_dir()
script_dict = {
'request': req.to_dict(),
'vnf_instance': inst.to_dict(),
'grant_request': grant_req.to_dict(),
'grant_response': grant.to_dict(),
'tmp_csar_dir': tmp_csar_dir
}
script_path = os.path.join(
os.path.dirname(__file__), "userdata_main.py")
out = subprocess.run(["python3", script_path, "INSTANTIATE"],
input=pickle.dumps(script_dict),
capture_output=True)
vnfd.remove_tmp_csar_dir(tmp_csar_dir)
if out.returncode != 0:
LOG.debug("execute userdata class instantiate failed: %s",
out.stderr)
raise sol_ex.UserdataExecutionFailed(sol_detail=out.stderr)
fields = pickle.loads(out.stdout)
stack_name = heat_utils.get_stack_name(inst)
fields['stack_name'] = stack_name
fields['timeout_mins'] = (
CONF.v2_vnfm.openstack_vim_stack_create_timeout)
return fields
def _address_range_data_to_info(self, range_data):
obj = objects.ipOverEthernetAddressInfoV2_IpAddresses_AddressRange()
obj.minAddress = range_data.minAddress
obj.maxAddress = range_data.maxAddress
return obj
def _proto_data_to_info(self, proto_data):
# make CpProtocolInfo (5.5.3.9b) from CpProtocolData (4.4.1.10b)
proto_info = objects.CpProtocolInfoV2(
layerProtocol=proto_data.layerProtocol
)
ip_info = objects.IpOverEthernetAddressInfoV2()
ip_data = proto_data.ipOverEthernet
if ip_data.obj_attr_is_set('macAddress'):
ip_info.macAddress = ip_data.macAddress
if ip_data.obj_attr_is_set('segmentationId'):
ip_info.segmentationId = ip_data.segmentationId
if ip_data.obj_attr_is_set('ipAddresses'):
addr_infos = []
for addr_data in ip_data.ipAddresses:
addr_info = objects.IpOverEthernetAddressInfoV2_IpAddresses(
type=addr_data.type)
if addr_data.obj_attr_is_set('fixedAddresses'):
addr_info.addresses = addr_data.fixedAddresses
if addr_data.obj_attr_is_set('numDynamicAddresses'):
addr_info.isDynamic = True
if addr_data.obj_attr_is_set('addressRange'):
addr_info.addressRange = self._address_range_data_to_info(
addr_data.addressRange)
if addr_data.obj_attr_is_set('subnetId'):
addr_info.subnetId = addr_data.subnetId
addr_infos.append(addr_info)
ip_info.ipAddresses = addr_infos
proto_info.ipOverEthernet = ip_info
return proto_info
def make_instantiated_vnf_info(self, req, inst, grant, vnfd, heat_reses):
flavour_id = req.flavourId
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
inst_vnf_info = objects.VnfInstanceV2_InstantiatedVnfInfo(
flavourId=flavour_id,
vnfState='STARTED',
)
# make virtualStorageResourceInfo
storages = vnfd.get_storage_nodes(flavour_id)
reses = heat_utils.get_storage_reses(heat_reses)
storage_infos = []
storage_info_to_heat_res = {}
for res in reses:
storage_name = res['resource_name']
if storage_name not in list(storages.keys()):
# should not occur. just check for consistency.
LOG.debug("%s not in VNFD storage definition.", storage_name)
continue
storage_info = objects.VirtualStorageResourceInfoV2(
id=uuidutils.generate_uuid(),
virtualStorageDescId=storage_name,
storageResource=objects.ResourceHandle(
resourceId=res['physical_resource_id'],
vimLevelResourceType=res['resource_type'],
vimConnectionId=vim_info.vimId,
)
)
storage_infos.append(storage_info)
storage_info_to_heat_res[storage_info.id] = res
if storage_infos:
inst_vnf_info.virtualStorageResourceInfo = storage_infos
# make vnfcResourceInfo
vdus = vnfd.get_vdu_nodes(flavour_id)
reses = heat_utils.get_server_reses(heat_reses)
vnfc_res_infos = []
vnfc_res_info_to_heat_res = {}
for res in reses:
vdu_name = res['resource_name']
if vdu_name not in list(vdus.keys()):
# should not occur. just check for consistency.
LOG.debug("%s not in VNFD VDU definition.", vdu_name)
continue
vnfc_res_info = objects.VnfcResourceInfoV2(
id=uuidutils.generate_uuid(),
vduId=vdu_name,
computeResource=objects.ResourceHandle(
resourceId=res['physical_resource_id'],
vimLevelResourceType=res['resource_type'],
vimConnectionId=vim_info.vimId,
),
)
vdu_cps = vnfd.get_vdu_cps(flavour_id, vdu_name)
cp_infos = []
for cp in vdu_cps:
cp_info = objects.VnfcResourceInfoV2_VnfcCpInfo(
id=uuidutils.generate_uuid(),
cpdId=cp,
# vnfExtCpId or vnfLinkPortId may set later
)
cp_infos.append(cp_info)
if cp_infos:
vnfc_res_info.vnfcCpInfo = cp_infos
# find storages used by this
storage_ids = []
for storage_id, storage_res in storage_info_to_heat_res.items():
if (vdu_name in storage_res.get('required_by', []) and
res.get('parent_resource') ==
storage_res.get('parent_resource')):
storage_ids.append(storage_id)
if storage_ids:
vnfc_res_info.storageResourceIds = storage_ids
vnfc_res_infos.append(vnfc_res_info)
vnfc_res_info_to_heat_res[vnfc_res_info.id] = res
if vnfc_res_infos:
inst_vnf_info.vnfcResourceInfo = vnfc_res_infos
# make vnfVirtualLinkResourceInfo
vls = vnfd.get_virtual_link_nodes(flavour_id)
reses = heat_utils.get_network_reses(heat_reses)
vnf_vl_infos = []
vnf_vl_info_to_heat_res = {}
for res in reses:
vl_name = res['resource_name']
if vl_name not in list(vls.keys()):
# should not occur. just check for consistency.
LOG.debug("%s not in VNFD VL definition.", vl_name)
continue
vnf_vl_info = objects.VnfVirtualLinkResourceInfoV2(
id=uuidutils.generate_uuid(),
vnfVirtualLinkDescId=vl_name,
networkResource=objects.ResourceHandle(
resourceId=res['physical_resource_id'],
vimLevelResourceType=res['resource_type'],
vimConnectionId=vim_info.vimId,
),
# vnfLinkPorts set later
)
vnf_vl_infos.append(vnf_vl_info)
vnf_vl_info_to_heat_res[vnf_vl_info.id] = res
if vnf_vl_infos:
inst_vnf_info.vnfVirtualLinkResourceInfo = vnf_vl_infos
# make extVirtualLinkInfo
ext_vls = []
req_ext_vls = []
ext_cp_infos = []
if grant.obj_attr_is_set('extVirtualLinks'):
req_ext_vls = grant.extVirtualLinks
elif req.obj_attr_is_set('extVirtualLinks'):
req_ext_vls = req.extVirtualLinks
for req_ext_vl in req_ext_vls:
ext_vl = objects.ExtVirtualLinkInfoV2(
id=req_ext_vl.id,
resourceHandle=objects.ResourceHandle(
id=uuidutils.generate_uuid(),
resourceId=req_ext_vl.resourceId
),
currentVnfExtCpData=req_ext_vl.extCps
)
if req_ext_vl.obj_attr_is_set('vimConnectionId'):
ext_vl.resourceHandle.vimConnectionId = (
req_ext_vl.vimConnectionId)
if req_ext_vl.obj_attr_is_set('resourceProviderId'):
ext_vl.resourceHandle.resourceProviderId = (
req_ext_vl.resourceProviderId)
ext_vls.append(ext_vl)
if not req_ext_vl.obj_attr_is_set('extLinkPorts'):
continue
link_ports = []
for req_link_port in req_ext_vl.extLinkPorts:
link_port = objects.ExtLinkPortInfoV2(
id=req_link_port.id,
resourceHandle=req_link_port.resourceHandle,
)
ext_cp_info = objects.VnfExtCpInfoV2(
id=uuidutils.generate_uuid(),
extLinkPortId=link_port.id
# associatedVnfcCpId may set later
)
link_port.cpInstanceId = ext_cp_info.id
for ext_cp in req_ext_vl.extCps:
found = False
for key, cp_conf in ext_cp.cpConfig.items():
if (cp_conf.obj_attr_is_set('linkPortId') and
cp_conf.linkPortId == link_port.id):
ext_cp_info.cpdId = ext_cp.cpdId
ext_cp_info.cpConfigId = key
# NOTE: cpProtocolInfo can't be filled
found = True
break
if found:
break
link_ports.append(link_port)
ext_cp_infos.append(ext_cp_info)
ext_vl.extLinkPorts = link_ports
if ext_vls:
inst_vnf_info.extVirtualLinkInfo = ext_vls
# ext_cp_infos set later
# make extManagedVirtualLinkInfo
ext_mgd_vls = []
req_mgd_vls = []
if grant.obj_attr_is_set('extManagedVirtualLinks'):
req_mgd_vls = grant.extManagedVirtualLinks
elif req.obj_attr_is_set('extManagedVirtualLinks'):
req_mgd_vls = req.extManagedVirtualLinks
for req_mgd_vl in req_mgd_vls:
ext_mgd_vl = objects.ExtManagedVirtualLinkInfoV2(
id=req_mgd_vl.id,
vnfVirtualLinkDescId=req_mgd_vl.vnfVirtualLinkDescId,
networkResource=objects.ResourceHandle(
id=uuidutils.generate_uuid(),
resourceId=req_mgd_vl.resourceId
),
)
if req_mgd_vl.obj_attr_is_set('vimConnectionId'):
ext_mgd_vl.networkResource.vimConnectionId = (
req_mgd_vl.vimConnectionId)
if req_mgd_vl.obj_attr_is_set('resourceProviderId'):
ext_mgd_vl.networkResource.resourceProviderId = (
req_mgd_vl.resourceProviderId)
ext_mgd_vls.append(ext_mgd_vl)
if not req_mgd_vl.obj_attr_is_set('vnfLinkPort'):
continue
link_ports = []
for req_link_port in req_mgd_vl.vnfLinkPort:
link_port = objects.VnfLinkPortInfoV2(
id=req_link_port.vnfLinkPortId,
resourceHandle=req_link_port.resourceHandle,
cpInstanceType='EXT_CP', # may be changed later
# cpInstanceId may set later
)
link_ports.append(link_port)
ext_mgd_vl.vnfLinkPort = link_ports
if ext_mgd_vls:
inst_vnf_info.extManagedVirtualLinkInfo = ext_mgd_vls
# make CP related infos
vdu_cps = vnfd.get_vducp_nodes(flavour_id)
reses = heat_utils.get_port_reses(heat_reses)
for res in reses:
cp_name = res['resource_name']
if cp_name not in list(vdu_cps.keys()):
# should not occur. just check for consistency.
LOG.debug("%s not in VNFD CP definition.", cp_name)
continue
vl_name = vnfd.get_vl_name_from_cp(flavour_id, vdu_cps[cp_name])
is_external = False
if vl_name is None: # extVirtualLink
is_external = True
# NOTE: object is diffrent from other vl types
vnf_link_port = objects.ExtLinkPortInfoV2(
id=uuidutils.generate_uuid(),
resourceHandle=objects.ResourceHandle(
resourceId=res['physical_resource_id'],
vimLevelResourceType=res['resource_type'],
vimConnectionId=vim_info.vimId,
)
)
ext_cp_info = objects.VnfExtCpInfoV2(
id=uuidutils.generate_uuid(),
extLinkPortId=vnf_link_port.id,
cpdId=cp_name
# associatedVnfcCpId may set later
)
vnf_link_port.cpInstanceId = ext_cp_info.id
found = False
for ext_vl in ext_vls:
for ext_cp in ext_vl.currentVnfExtCpData:
if ext_cp.cpdId == cp_name:
found = True
break
if found:
break
if found:
if ext_vl.obj_attr_is_set('extLinkPorts'):
ext_vl.extLinkPorts.append(vnf_link_port)
else:
ext_vl.extLinkPorts = [vnf_link_port]
for key, cp_conf in ext_cp.cpConfig.items():
# NOTE: it is assumed that there is one item
# (with cpProtocolData) of cpConfig at the moment.
if cp_conf.obj_attr_is_set('cpProtocolData'):
proto_infos = []
for proto_data in cp_conf.cpProtocolData:
proto_info = self._proto_data_to_info(
proto_data)
proto_infos.append(proto_info)
ext_cp_info.cpProtocolInfo = proto_infos
ext_cp_info.cpConfigId = key
break
ext_cp_infos.append(ext_cp_info)
else:
# Internal VL or extManagedVirtualLink
vnf_link_port = objects.VnfLinkPortInfoV2(
id=uuidutils.generate_uuid(),
resourceHandle=objects.ResourceHandle(
resourceId=res['physical_resource_id'],
vimLevelResourceType=res['resource_type'],
vimConnectionId=vim_info.vimId,
cpInstanceType='EXT_CP' # may be changed later
)
)
is_internal = False
for vnf_vl_info in vnf_vl_infos:
if vnf_vl_info.vnfVirtualLinkDescId == vl_name:
# Internal VL
is_internal = True
if vnf_vl_info.obj_attr_is_set('vnfLinkPorts'):
vnf_vl_info.vnfLinkPorts.append(vnf_link_port)
else:
vnf_vl_info.vnfLinkPorts = [vnf_link_port]
if not is_internal:
# extManagedVirtualLink
for ext_mgd_vl in ext_mgd_vls:
# should be found
if ext_mgd_vl.vnfVirtualLinkDescId == vl_name:
if ext_mgd_vl.obj_attr_is_set('vnfLinkPorts'):
ext_mgd_vl.vnfLinkPorts.append(vnf_link_port)
else:
ext_mgd_vl.vnfLinkPorts = [vnf_link_port]
# link to vnfcResourceInfo.vnfcCpInfo
for vnfc_res_info in vnfc_res_infos:
if not vnfc_res_info.obj_attr_is_set('vnfcCpInfo'):
continue
vnfc_res = vnfc_res_info_to_heat_res[vnfc_res_info.id]
vdu_name = vnfc_res_info.vduId
if not (vdu_name in res.get('required_by', []) and
res.get('parent_resource') ==
vnfc_res.get('parent_resource')):
continue
for vnfc_cp in vnfc_res_info.vnfcCpInfo:
if vnfc_cp.cpdId != cp_name:
continue
if is_external:
vnfc_cp.vnfExtCpId = vnf_link_port.cpInstanceId
for ext_cp_info in ext_cp_infos:
if ext_cp_info.extLinkPortId == vnf_link_port.id:
ext_cp_info.associatedVnfcCpId = vnfc_cp.id
break
else:
vnf_link_port.cpInstanceType = 'VNFC_CP'
vnf_link_port.cpInstanceId = vnfc_cp.id
vnfc_cp.vnfLinkPortId = vnf_link_port.id
break
if ext_cp_infos:
inst_vnf_info.extCpInfo = ext_cp_infos
# NOTE: The followings are not handled at the moment.
# - handle tosca.nodes.nfv.VnfExtCp type
# Note that there is no example in current tacker examples which use
# tosca.nodes.nfv.VnfExtCp type and related BaseHOT definitions.
# - in the case of specifying linkPortId of extVirtualLinks or
# extManagedVirtualLinks, the link of vnfcCpInfo is not handled
# because the association of compute resource and port resource
# is not identified.
# make vnfcInfo
# NOTE: vnfcInfo only exists in SOL002
vnfc_infos = []
for vnfc_res_info in vnfc_res_infos:
vnfc_info = objects.VnfcInfoV2(
id=uuidutils.generate_uuid(),
vduId=vnfc_res_info.vduId,
vnfcResourceInfoId=vnfc_res_info.id,
vnfcState='STARTED'
)
vnfc_infos.append(vnfc_info)
if vnfc_infos:
inst_vnf_info.vnfcInfo = vnfc_infos
inst.instantiatedVnfInfo = inst_vnf_info
def terminate(self, req, inst, grant_req, grant, vnfd):
if req.terminationType == 'GRACEFUL':
timeout = CONF.v2_vnfm.default_graceful_termination_timeout
if req.obj_attr_is_set('gracefulTerminationTimeout'):
timeout = req.gracefulTerminationTimeout
eventlet.sleep(timeout)
# delete stack
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
stack_name = heat_utils.get_stack_name(inst)
heat_client.delete_stack(stack_name)
heat_client.wait_stack_delete(stack_name)
| 41.408582 | 79 | 0.577788 |
7956153f1259f96dd0865165951ff7eca1141bfb | 7,132 | py | Python | src/core/layers/scalable_graphlearn.py | yuanqidu/IDGL | 64d2d73289ca0f6dcab966062d4cb15844236b37 | [
"Apache-2.0"
] | 153 | 2019-12-22T07:26:10.000Z | 2022-03-29T02:03:18.000Z | src/core/layers/scalable_graphlearn.py | yuanqidu/IDGL | 64d2d73289ca0f6dcab966062d4cb15844236b37 | [
"Apache-2.0"
] | 17 | 2020-01-14T15:20:26.000Z | 2022-01-23T06:06:03.000Z | src/core/layers/scalable_graphlearn.py | yuanqidu/IDGL | 64d2d73289ca0f6dcab966062d4cb15844236b37 | [
"Apache-2.0"
] | 21 | 2020-07-27T00:58:37.000Z | 2022-02-02T01:47:37.000Z | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..utils.generic_utils import to_cuda, normalize_adj
from ..utils.constants import VERY_SMALL_NUMBER, INF
def compute_normalized_laplacian(adj):
rowsum = torch.sum(adj, -1)
d_inv_sqrt = torch.pow(rowsum, -0.5)
d_inv_sqrt[torch.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = torch.diagflat(d_inv_sqrt)
L_norm = torch.mm(torch.mm(d_mat_inv_sqrt, adj), d_mat_inv_sqrt)
return L_norm
class AnchorGraphLearner(nn.Module):
def __init__(self, input_size, hidden_size, topk=None, epsilon=None, num_pers=16, metric_type='attention', device=None):
super(AnchorGraphLearner, self).__init__()
self.device = device
self.topk = topk
self.epsilon = epsilon
self.metric_type = metric_type
if metric_type == 'attention':
self.linear_sims = nn.ModuleList([nn.Linear(input_size, hidden_size, bias=False) for _ in range(num_pers)])
print('[ Multi-perspective {} AnchorGraphLearner: {} ]'.format(metric_type, num_pers))
elif metric_type == 'weighted_cosine':
self.weight_tensor = torch.Tensor(num_pers, input_size)
self.weight_tensor = nn.Parameter(nn.init.xavier_uniform_(self.weight_tensor))
print('[ Multi-perspective {} AnchorGraphLearner: {} ]'.format(metric_type, num_pers))
elif metric_type == 'gat_attention':
self.linear_sims1 = nn.ModuleList([nn.Linear(input_size, 1, bias=False) for _ in range(num_pers)])
self.linear_sims2 = nn.ModuleList([nn.Linear(input_size, 1, bias=False) for _ in range(num_pers)])
self.leakyrelu = nn.LeakyReLU(0.2)
print('[ GAT_Attention AnchorGraphLearner]')
elif metric_type == 'kernel':
self.precision_inv_dis = nn.Parameter(torch.Tensor(1, 1))
self.precision_inv_dis.data.uniform_(0, 1.0)
self.weight = nn.Parameter(nn.init.xavier_uniform_(torch.Tensor(input_size, hidden_size)))
elif metric_type == 'transformer':
self.linear_sim1 = nn.Linear(input_size, hidden_size, bias=False)
self.linear_sim2 = nn.Linear(input_size, hidden_size, bias=False)
elif metric_type == 'cosine':
pass
else:
raise ValueError('Unknown metric_type: {}'.format(metric_type))
print('[ Graph Learner metric type: {} ]'.format(metric_type))
def forward(self, context, anchors, ctx_mask=None, anchor_mask=None):
"""
Parameters
:context, (batch_size, ctx_size, dim)
:ctx_mask, (batch_size, ctx_size)
Returns
:attention, (batch_size, ctx_size, ctx_size)
"""
if self.metric_type == 'attention':
attention = 0
for _ in range(len(self.linear_sims)):
context_fc = torch.relu(self.linear_sims[_](context))
attention += torch.matmul(context_fc, context_fc.transpose(-1, -2))
attention /= len(self.linear_sims)
markoff_value = -INF
elif self.metric_type == 'weighted_cosine':
expand_weight_tensor = self.weight_tensor.unsqueeze(1)
if len(context.shape) == 3:
expand_weight_tensor = expand_weight_tensor.unsqueeze(1)
context_fc = context.unsqueeze(0) * expand_weight_tensor
context_norm = F.normalize(context_fc, p=2, dim=-1)
anchors_fc = anchors.unsqueeze(0) * expand_weight_tensor
anchors_norm = F.normalize(anchors_fc, p=2, dim=-1)
attention = torch.matmul(context_norm, anchors_norm.transpose(-1, -2)).mean(0)
markoff_value = 0
elif self.metric_type == 'transformer':
Q = self.linear_sim1(context)
attention = torch.matmul(Q, Q.transpose(-1, -2)) / math.sqrt(Q.shape[-1])
markoff_value = -INF
elif self.metric_type == 'gat_attention':
attention = []
for _ in range(len(self.linear_sims1)):
a_input1 = self.linear_sims1[_](context)
a_input2 = self.linear_sims2[_](context)
attention.append(self.leakyrelu(a_input1 + a_input2.transpose(-1, -2)))
attention = torch.mean(torch.stack(attention, 0), 0)
markoff_value = -INF
elif self.metric_type == 'kernel':
dist_weight = torch.mm(self.weight, self.weight.transpose(-1, -2))
attention = self.compute_distance_mat(context, dist_weight)
attention = torch.exp(-0.5 * attention * (self.precision_inv_dis**2))
markoff_value = 0
elif self.metric_type == 'cosine':
context_norm = context.div(torch.norm(context, p=2, dim=-1, keepdim=True))
attention = torch.mm(context_norm, context_norm.transpose(-1, -2)).detach()
markoff_value = 0
if ctx_mask is not None:
attention = attention.masked_fill_(1 - ctx_mask.byte().unsqueeze(-1), markoff_value)
if anchor_mask is not None:
attention = attention.masked_fill_(1 - anchor_mask.byte().unsqueeze(-2), markoff_value)
if self.epsilon is not None:
attention = self.build_epsilon_neighbourhood(attention, self.epsilon, markoff_value)
if self.topk is not None:
attention = self.build_knn_neighbourhood(attention, self.topk, markoff_value)
return attention
def build_knn_neighbourhood(self, attention, topk, markoff_value):
topk = min(topk, attention.size(-1))
knn_val, knn_ind = torch.topk(attention, topk, dim=-1)
weighted_adjacency_matrix = to_cuda((markoff_value * torch.ones_like(attention)).scatter_(-1, knn_ind, knn_val), self.device)
return weighted_adjacency_matrix
def build_epsilon_neighbourhood(self, attention, epsilon, markoff_value):
mask = (attention > epsilon).detach().float()
weighted_adjacency_matrix = attention * mask + markoff_value * (1 - mask)
return weighted_adjacency_matrix
def compute_distance_mat(self, X, weight=None):
if weight is not None:
trans_X = torch.mm(X, weight)
else:
trans_X = X
norm = torch.sum(trans_X * X, dim=-1)
dists = -2 * torch.matmul(trans_X, X.transpose(-1, -2)) + norm.unsqueeze(0) + norm.unsqueeze(1)
return dists
def get_binarized_kneighbors_graph(features, topk, mask=None, device=None):
assert features.requires_grad is False
# Compute cosine similarity matrix
features_norm = features.div(torch.norm(features, p=2, dim=-1, keepdim=True))
attention = torch.matmul(features_norm, features_norm.transpose(-1, -2))
if mask is not None:
attention = attention.masked_fill_(1 - mask.byte().unsqueeze(1), 0)
attention = attention.masked_fill_(1 - mask.byte().unsqueeze(-1), 0)
# Extract and Binarize kNN-graph
topk = min(topk, attention.size(-1))
_, knn_ind = torch.topk(attention, topk, dim=-1)
adj = to_cuda(torch.zeros_like(attention).scatter_(-1, knn_ind, 1), device)
return adj
| 40.988506 | 133 | 0.645121 |
795615caf7840d1211dbecdf54b583eae88e9675 | 2,067 | py | Python | test/test_mutatate_strategies.py | cslotboom/Naturalize | f54beb0baa53f33a66e85660738eb4880419aa1e | [
"MIT"
] | null | null | null | test/test_mutatate_strategies.py | cslotboom/Naturalize | f54beb0baa53f33a66e85660738eb4880419aa1e | [
"MIT"
] | null | null | null | test/test_mutatate_strategies.py | cslotboom/Naturalize | f54beb0baa53f33a66e85660738eb4880419aa1e | [
"MIT"
] | null | null | null |
import naturalize.mutate.strategies as st
# from naturalize.solutionClass import Individual
import numpy as np
# np.random.seed(25)
N = 6
genea = np.linspace(0,5,N)
geneb = np.linspace(0,5,N)*5
Pvector = np.array([0.1, 0.1, 0.4, 0.5, 1, 0.1])
threshold = 0.2
bounds = np.ones([2,N])*np.array([[0,30]]).T
# purtThreshold = 0.1
def test_mutateGeneRandom():
solution1 = np.array([0,5,2,3,4,25])
out1 = st._mutateRandom(genea, geneb, threshold, Pvector,N)
check1 = np.all(out1 == solution1)
# check2 = np.all(out2 == solution2)
assert check1== True
# test_mutateGeneRandom()
def test_mutateGenePereturbate_1():
"""
This is a crude check right now because it doens't directly check the output,
it only checks if it's within the expected bound.
We do three checks to compensate for this
"""
purtThreshold = 0.1
out1 = st._mutatePerturbate(genea, threshold, Pvector, N, purtThreshold)
check1 = np.sum(np.abs(out1 - genea)) < np.sum(purtThreshold*genea)
# check2 = np.sum(out2 - solution2) < 0.0001
assert check1== True
def test_mutateGenePereturbate_2():
purtThreshold = 0.0000001
out1 = st._mutatePerturbate(genea, threshold, Pvector, N, purtThreshold)
check1 = np.sum(np.abs(out1 - genea)) < np.sum(purtThreshold*genea)
# check2 = np.sum(out2 - solution2) < 0.0001
assert check1== True
def test_mutateGenePereturbate_3():
purtThreshold = 0.5
out1 = st._mutatePerturbate(genea, threshold, Pvector, N, purtThreshold)
check1 = np.sum(np.abs(out1 - genea)) < np.sum(purtThreshold*genea)
# check2 = np.sum(out2 - solution2) < 0.0001
assert check1== True
def test_EnforceBoundary():
inArray = np.array([-1,-2,4,5,45,31])
solution = np.array([0,0,4,5,30,30])
bounds
output = st._enforceBoundary(inArray, bounds)
check1 = np.all(output == solution)
assert check1
# test_EnforceBoundary()
# test_mutateGenePereturbate_1()
# test_mutateGenePereturbate_2()
# test_mutateGenePereturbate_3() | 26.5 | 81 | 0.670053 |
7956185868f3f5607ec5540020f2d0b1f02fcaaa | 10,606 | py | Python | venv/lib/python2.7/site-packages/alembic/ddl/mysql.py | jessekl/twiliochallenge | 2bba8bc2e0928880f1e2abe6b53b96dbc67ef34f | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/alembic/ddl/mysql.py | jessekl/twiliochallenge | 2bba8bc2e0928880f1e2abe6b53b96dbc67ef34f | [
"MIT"
] | null | null | null | venv/lib/python2.7/site-packages/alembic/ddl/mysql.py | jessekl/twiliochallenge | 2bba8bc2e0928880f1e2abe6b53b96dbc67ef34f | [
"MIT"
] | null | null | null | from sqlalchemy.ext.compiler import compiles
from sqlalchemy import types as sqltypes
from sqlalchemy import schema
from ..compat import string_types
from .. import util
from .impl import DefaultImpl
from .base import ColumnNullable, ColumnName, ColumnDefault, \
ColumnType, AlterColumn, format_column_name, \
format_server_default
from .base import alter_table
from ..autogenerate import compare
class MySQLImpl(DefaultImpl):
__dialect__ = 'mysql'
transactional_ddl = False
def alter_column(self, table_name, column_name,
nullable=None,
server_default=False,
name=None,
type_=None,
schema=None,
autoincrement=None,
existing_type=None,
existing_server_default=None,
existing_nullable=None,
existing_autoincrement=None
):
if name is not None:
self._exec(
MySQLChangeColumn(
table_name, column_name,
schema=schema,
newname=name,
nullable=nullable if nullable is not None else
existing_nullable
if existing_nullable is not None
else True,
type_=type_ if type_ is not None else existing_type,
default=server_default if server_default is not False
else existing_server_default,
autoincrement=autoincrement if autoincrement is not None
else existing_autoincrement
)
)
elif nullable is not None or \
type_ is not None or \
autoincrement is not None:
self._exec(
MySQLModifyColumn(
table_name, column_name,
schema=schema,
newname=name if name is not None else column_name,
nullable=nullable if nullable is not None else
existing_nullable
if existing_nullable is not None
else True,
type_=type_ if type_ is not None else existing_type,
default=server_default if server_default is not False
else existing_server_default,
autoincrement=autoincrement if autoincrement is not None
else existing_autoincrement
)
)
elif server_default is not False:
self._exec(
MySQLAlterDefault(
table_name, column_name, server_default,
schema=schema,
)
)
def compare_server_default(self, inspector_column,
metadata_column,
rendered_metadata_default,
rendered_inspector_default):
# partially a workaround for SQLAlchemy issue #3023; if the
# column were created without "NOT NULL", MySQL may have added
# an implicit default of '0' which we need to skip
if metadata_column.type._type_affinity is sqltypes.Integer and \
inspector_column.primary_key and \
not inspector_column.autoincrement and \
not rendered_metadata_default and \
rendered_inspector_default == "'0'":
return False
else:
return rendered_inspector_default != rendered_metadata_default
def correct_for_autogen_constraints(self, conn_unique_constraints,
conn_indexes,
metadata_unique_constraints,
metadata_indexes):
# TODO: if SQLA 1.0, make use of "duplicates_index"
# metadata
removed = set()
for idx in list(conn_indexes):
if idx.unique:
continue
# MySQL puts implicit indexes on FK columns, even if
# composite and even if MyISAM, so can't check this too easily.
# the name of the index may be the column name or it may
# be the name of the FK constraint.
for col in idx.columns:
if idx.name == col.name:
conn_indexes.remove(idx)
removed.add(idx.name)
break
for fk in col.foreign_keys:
if fk.name == idx.name:
conn_indexes.remove(idx)
removed.add(idx.name)
break
if idx.name in removed:
break
# then remove indexes from the "metadata_indexes"
# that we've removed from reflected, otherwise they come out
# as adds (see #202)
for idx in list(metadata_indexes):
if idx.name in removed:
metadata_indexes.remove(idx)
# then dedupe unique indexes vs. constraints, since MySQL
# doesn't really have unique constraints as a separate construct.
# but look in the metadata and try to maintain constructs
# that already seem to be defined one way or the other
# on that side. See #276
metadata_uq_names = set([
cons.name for cons in metadata_unique_constraints
if cons.name is not None])
unnamed_metadata_uqs = set([
compare._uq_constraint_sig(cons).sig
for cons in metadata_unique_constraints
if cons.name is None
])
metadata_ix_names = set([
cons.name for cons in metadata_indexes if cons.unique])
conn_uq_names = dict(
(cons.name, cons) for cons in conn_unique_constraints
)
conn_ix_names = dict(
(cons.name, cons) for cons in conn_indexes if cons.unique
)
for overlap in set(conn_uq_names).intersection(conn_ix_names):
if overlap not in metadata_uq_names:
if compare._uq_constraint_sig(conn_uq_names[overlap]).sig \
not in unnamed_metadata_uqs:
conn_unique_constraints.discard(conn_uq_names[overlap])
elif overlap not in metadata_ix_names:
conn_indexes.discard(conn_ix_names[overlap])
class MySQLAlterDefault(AlterColumn):
def __init__(self, name, column_name, default, schema=None):
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.default = default
class MySQLChangeColumn(AlterColumn):
def __init__(self, name, column_name, schema=None,
newname=None,
type_=None,
nullable=None,
default=False,
autoincrement=None):
super(AlterColumn, self).__init__(name, schema=schema)
self.column_name = column_name
self.nullable = nullable
self.newname = newname
self.default = default
self.autoincrement = autoincrement
if type_ is None:
raise util.CommandError(
"All MySQL CHANGE/MODIFY COLUMN operations "
"require the existing type."
)
self.type_ = sqltypes.to_instance(type_)
class MySQLModifyColumn(MySQLChangeColumn):
pass
@compiles(ColumnNullable, 'mysql')
@compiles(ColumnName, 'mysql')
@compiles(ColumnDefault, 'mysql')
@compiles(ColumnType, 'mysql')
def _mysql_doesnt_support_individual(element, compiler, **kw):
raise NotImplementedError(
"Individual alter column constructs not supported by MySQL"
)
@compiles(MySQLAlterDefault, "mysql")
def _mysql_alter_default(element, compiler, **kw):
return "%s ALTER COLUMN %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
"SET DEFAULT %s" % format_server_default(compiler, element.default)
if element.default is not None
else "DROP DEFAULT"
)
@compiles(MySQLModifyColumn, "mysql")
def _mysql_modify_column(element, compiler, **kw):
return "%s MODIFY %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement
),
)
@compiles(MySQLChangeColumn, "mysql")
def _mysql_change_column(element, compiler, **kw):
return "%s CHANGE %s %s %s" % (
alter_table(compiler, element.table_name, element.schema),
format_column_name(compiler, element.column_name),
format_column_name(compiler, element.newname),
_mysql_colspec(
compiler,
nullable=element.nullable,
server_default=element.default,
type_=element.type_,
autoincrement=element.autoincrement
),
)
def _render_value(compiler, expr):
if isinstance(expr, string_types):
return "'%s'" % expr
else:
return compiler.sql_compiler.process(expr)
def _mysql_colspec(compiler, nullable, server_default, type_,
autoincrement):
spec = "%s %s" % (
compiler.dialect.type_compiler.process(type_),
"NULL" if nullable else "NOT NULL"
)
if autoincrement:
spec += " AUTO_INCREMENT"
if server_default is not False and server_default is not None:
spec += " DEFAULT %s" % _render_value(compiler, server_default)
return spec
@compiles(schema.DropConstraint, "mysql")
def _mysql_drop_constraint(element, compiler, **kw):
"""Redefine SQLAlchemy's drop constraint to
raise errors for invalid constraint type."""
constraint = element.element
if isinstance(constraint, (schema.ForeignKeyConstraint,
schema.PrimaryKeyConstraint,
schema.UniqueConstraint)
):
return compiler.visit_drop_constraint(element, **kw)
elif isinstance(constraint, schema.CheckConstraint):
raise NotImplementedError(
"MySQL does not support CHECK constraints.")
else:
raise NotImplementedError(
"No generic 'DROP CONSTRAINT' in MySQL - "
"please specify constraint type")
| 36.954704 | 76 | 0.589289 |
7956187d05c0f76baae9f93d2debb11aa99835a9 | 21 | py | Python | projects/flake8-quotes/test.py | quinn-dougherty/python-on-nix | 910d3f6554acd4a4ef0425ebccd31104dccb283c | [
"Unlicense"
] | 25 | 2021-10-30T19:54:59.000Z | 2022-03-29T06:11:02.000Z | projects/flake8-quotes/test.py | quinn-dougherty/python-on-nix | 910d3f6554acd4a4ef0425ebccd31104dccb283c | [
"Unlicense"
] | 21 | 2021-10-19T01:09:38.000Z | 2022-03-24T16:08:53.000Z | projects/flake8-quotes/test.py | quinn-dougherty/python-on-nix | 910d3f6554acd4a4ef0425ebccd31104dccb283c | [
"Unlicense"
] | 3 | 2022-01-25T20:25:13.000Z | 2022-03-08T02:58:50.000Z | import flake8_quotes
| 10.5 | 20 | 0.904762 |
79561890e099f681c1377d32704e8676464b3ab8 | 3,909 | py | Python | django_extensions/management/commands/show_template_tags.py | KazakovDenis/django-extensions | ef3b3abe3c3d6563b73633bd25e3ff3ac9716661 | [
"MIT"
] | 4,057 | 2015-01-01T17:56:25.000Z | 2022-03-31T16:32:40.000Z | django_extensions/management/commands/show_template_tags.py | KazakovDenis/django-extensions | ef3b3abe3c3d6563b73633bd25e3ff3ac9716661 | [
"MIT"
] | 1,115 | 2015-01-01T14:59:38.000Z | 2022-03-28T22:05:55.000Z | django_extensions/management/commands/show_template_tags.py | KazakovDenis/django-extensions | ef3b3abe3c3d6563b73633bd25e3ff3ac9716661 | [
"MIT"
] | 951 | 2015-01-02T16:57:26.000Z | 2022-03-28T21:42:22.000Z | # -*- coding: utf-8 -*-
import inspect
import os
import re
from django.apps import apps
from django.core.management import color
from django.core.management import BaseCommand
from django.utils import termcolors
from django.utils.encoding import smart_text
from django_extensions.compat import load_tag_library
from django_extensions.management.color import _dummy_style_func
from django_extensions.management.utils import signalcommand
def no_style():
style = color.no_style()
for role in ('FILTER', 'MODULE_NAME', 'TAG', 'TAGLIB'):
setattr(style, role, _dummy_style_func)
return style
def color_style():
style = color.color_style()
style.FILTER = termcolors.make_style(fg='yellow', opts=('bold',))
style.MODULE_NAME = termcolors.make_style(fg='green', opts=('bold',))
style.TAG = termcolors.make_style(fg='red', opts=('bold',))
style.TAGLIB = termcolors.make_style(fg='blue', opts=('bold',))
return style
def format_block(block, nlspaces=0):
"""
Format the given block of text, trimming leading/trailing
empty lines and any leading whitespace that is common to all lines.
The purpose is to let us list a code block as a multiline,
triple-quoted Python string, taking care of
indentation concerns.
http://code.activestate.com/recipes/145672/
"""
# separate block into lines
lines = smart_text(block).split('\n')
# remove leading/trailing empty lines
while lines and not lines[0]:
del lines[0]
while lines and not lines[-1]:
del lines[-1]
# look at first line to see how much indentation to trim
ws = re.match(r'\s*', lines[0]).group(0)
if ws:
lines = map(lambda x: x.replace(ws, '', 1), lines)
# remove leading/trailing blank lines (after leading ws removal)
# we do this again in case there were pure-whitespace lines
while lines and not lines[0]:
del lines[0]
while lines and not lines[-1]:
del lines[-1]
# account for user-specified leading spaces
flines = ['%s%s' % (' ' * nlspaces, line) for line in lines]
return '\n'.join(flines) + '\n'
class Command(BaseCommand):
help = "Displays template tags and filters available in the current project."
results = ""
def add_result(self, s, depth=0):
self.results += '%s\n' % s.rjust(depth * 4 + len(s))
@signalcommand
def handle(self, *args, **options):
if options['no_color']:
style = no_style()
else:
style = color_style()
for app_config in apps.get_app_configs():
app = app_config.name
try:
templatetag_mod = __import__(app + '.templatetags', {}, {}, [''])
except ImportError:
continue
mod_path = inspect.getabsfile(templatetag_mod)
mod_files = os.listdir(os.path.dirname(mod_path))
tag_files = [i.rstrip('.py') for i in mod_files if i.endswith('.py') and i[0] != '_']
app_labeled = False
for taglib in tag_files:
lib = load_tag_library(taglib)
if lib is None:
continue
if not app_labeled:
self.add_result('App: %s' % style.MODULE_NAME(app))
app_labeled = True
self.add_result('load: %s' % style.TAGLIB(taglib), 1)
libstuff = [
(lib.tags, 'Tag:', style.TAG),
(lib.filters, 'Filter:', style.FILTER)
]
for items, label, style_func in libstuff:
for item in items:
self.add_result('%s %s' % (label, style_func(item)), 2)
doc = inspect.getdoc(items[item])
if doc:
self.add_result(format_block(doc, 12))
return self.results
| 34.289474 | 97 | 0.601177 |
79561959859aa71c4ed28e1c638fff2be021a6d4 | 23,047 | py | Python | kivy/network/urlrequest.py | VICTORVICKIE/kivy | 55abc963fe9099c078a3a2253397de70c2ee17b1 | [
"MIT"
] | 13,889 | 2015-01-01T06:43:41.000Z | 2022-03-31T17:37:56.000Z | kivy/network/urlrequest.py | VICTORVICKIE/kivy | 55abc963fe9099c078a3a2253397de70c2ee17b1 | [
"MIT"
] | 4,570 | 2015-01-01T17:58:52.000Z | 2022-03-31T18:42:16.000Z | kivy/network/urlrequest.py | VICTORVICKIE/kivy | 55abc963fe9099c078a3a2253397de70c2ee17b1 | [
"MIT"
] | 3,786 | 2015-01-01T09:20:45.000Z | 2022-03-30T21:15:05.000Z | '''
UrlRequest
==========
.. versionadded:: 1.0.8
You can use the :class:`UrlRequest` to make asynchronous requests on the
web and get the result when the request is completed. The spirit is the
same as the XHR object in Javascript.
The content is also decoded if the Content-Type is
application/json and the result automatically passed through json.loads.
The syntax to create a request::
from kivy.network.urlrequest import UrlRequest
req = UrlRequest(url, on_success, on_redirect, on_failure, on_error,
on_progress, req_body, req_headers, chunk_size,
timeout, method, decode, debug, file_path, ca_file,
verify)
Only the first argument is mandatory: the rest are optional.
By default, a "GET" request will be sent. If the :attr:`UrlRequest.req_body` is
not None, a "POST" request will be sent. It's up to you to adjust
:attr:`UrlRequest.req_headers` to suit your requirements and the response
to the request will be accessible as the parameter called "result" on
the callback function of the on_success event.
Example of fetching JSON::
def got_json(req, result):
for key, value in req.resp_headers.items():
print('{}: {}'.format(key, value))
req = UrlRequest('https://httpbin.org/headers', got_json)
Example of Posting data (adapted from httplib example)::
import urllib
def bug_posted(req, result):
print('Our bug is posted!')
print(result)
params = urllib.urlencode({'@number': 12524, '@type': 'issue',
'@action': 'show'})
headers = {'Content-type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'}
req = UrlRequest('bugs.python.org', on_success=bug_posted, req_body=params,
req_headers=headers)
If you want a synchronous request, you can call the wait() method.
'''
from base64 import b64encode
from collections import deque
from threading import Thread, Event
from json import loads
from time import sleep
from kivy.compat import PY2
from kivy.config import Config
if PY2:
from httplib import HTTPConnection
from urlparse import urlparse, urlunparse
else:
from http.client import HTTPConnection
from urllib.parse import urlparse, urlunparse
try:
import ssl
HTTPSConnection = None
if PY2:
from httplib import HTTPSConnection
else:
from http.client import HTTPSConnection
except ImportError:
# depending the platform, if openssl support wasn't compiled before python,
# this class is not available.
pass
from kivy.clock import Clock
from kivy.weakmethod import WeakMethod
from kivy.logger import Logger
from kivy.utils import platform
# list to save UrlRequest and prevent GC on un-referenced objects
g_requests = []
class UrlRequest(Thread):
'''A UrlRequest. See module documentation for usage.
.. versionchanged:: 1.5.1
Add `debug` parameter
.. versionchanged:: 1.0.10
Add `method` parameter
.. versionchanged:: 1.8.0
Parameter `decode` added.
Parameter `file_path` added.
Parameter `on_redirect` added.
Parameter `on_failure` added.
.. versionchanged:: 1.9.1
Parameter `ca_file` added.
Parameter `verify` added.
.. versionchanged:: 1.10.0
Parameters `proxy_host`, `proxy_port` and `proxy_headers` added.
.. versionchanged:: 1.11.0
Parameters `on_cancel` added.
:Parameters:
`url`: str
Complete url string to call.
`on_success`: callback(request, result)
Callback function to call when the result has been fetched.
`on_redirect`: callback(request, result)
Callback function to call if the server returns a Redirect.
`on_failure`: callback(request, result)
Callback function to call if the server returns a Client or
Server Error.
`on_error`: callback(request, error)
Callback function to call if an error occurs.
`on_progress`: callback(request, current_size, total_size)
Callback function that will be called to report progression of the
download. `total_size` might be -1 if no Content-Length has been
reported in the http response.
This callback will be called after each `chunk_size` is read.
`on_cancel`: callback(request)
Callback function to call if user requested to cancel the download
operation via the .cancel() method.
`req_body`: str, defaults to None
Data to sent in the request. If it's not None, a POST will be done
instead of a GET.
`req_headers`: dict, defaults to None
Custom headers to add to the request.
`chunk_size`: int, defaults to 8192
Size of each chunk to read, used only when `on_progress` callback
has been set. If you decrease it too much, a lot of on_progress
callbacks will be fired and will slow down your download. If you
want to have the maximum download speed, increase the chunk_size
or don't use ``on_progress``.
`timeout`: int, defaults to None
If set, blocking operations will timeout after this many seconds.
`method`: str, defaults to 'GET' (or 'POST' if ``body`` is specified)
The HTTP method to use.
`decode`: bool, defaults to True
If False, skip decoding of the response.
`debug`: bool, defaults to False
If True, it will use the Logger.debug to print information
about url access/progression/errors.
`file_path`: str, defaults to None
If set, the result of the UrlRequest will be written to this path
instead of in memory.
`ca_file`: str, defaults to None
Indicates a SSL CA certificate file path to validate HTTPS
certificates against
`verify`: bool, defaults to True
If False, disables SSL CA certificate verification
`proxy_host`: str, defaults to None
If set, the proxy host to use for this connection.
`proxy_port`: int, defaults to None
If set, and `proxy_host` is also set, the port to use for
connecting to the proxy server.
`proxy_headers`: dict, defaults to None
If set, and `proxy_host` is also set, the headers to send to the
proxy server in the ``CONNECT`` request.
'''
def __init__(self, url, on_success=None, on_redirect=None,
on_failure=None, on_error=None, on_progress=None,
req_body=None, req_headers=None, chunk_size=8192,
timeout=None, method=None, decode=True, debug=False,
file_path=None, ca_file=None, verify=True, proxy_host=None,
proxy_port=None, proxy_headers=None, user_agent=None,
on_cancel=None, cookies=None):
super(UrlRequest, self).__init__()
self._queue = deque()
self._trigger_result = Clock.create_trigger(self._dispatch_result, 0)
self.daemon = True
self.on_success = WeakMethod(on_success) if on_success else None
self.on_redirect = WeakMethod(on_redirect) if on_redirect else None
self.on_failure = WeakMethod(on_failure) if on_failure else None
self.on_error = WeakMethod(on_error) if on_error else None
self.on_progress = WeakMethod(on_progress) if on_progress else None
self.on_cancel = WeakMethod(on_cancel) if on_cancel else None
self.decode = decode
self.file_path = file_path
self._debug = debug
self._result = None
self._error = None
self._is_finished = False
self._resp_status = None
self._resp_headers = None
self._resp_length = -1
self._chunk_size = chunk_size
self._timeout = timeout
self._method = method
self.verify = verify
self._proxy_host = proxy_host
self._proxy_port = proxy_port
self._proxy_headers = proxy_headers
self._cancel_event = Event()
self._user_agent = user_agent
self._cookies = cookies
if platform in ['android', 'ios']:
import certifi
self.ca_file = ca_file or certifi.where()
else:
self.ca_file = ca_file
#: Url of the request
self.url = url
#: Request body passed in __init__
self.req_body = req_body
#: Request headers passed in __init__
self.req_headers = req_headers
# save our request to prevent GC
g_requests.append(self)
self.start()
def run(self):
q = self._queue.appendleft
url = self.url
req_body = self.req_body
req_headers = self.req_headers or {}
user_agent = self._user_agent
cookies = self._cookies
if user_agent:
req_headers.setdefault('User-Agent', user_agent)
elif (
Config.has_section('network')
and 'useragent' in Config.items('network')
):
useragent = Config.get('network', 'useragent')
req_headers.setdefault('User-Agent', useragent)
if cookies:
req_headers.setdefault("Cookie", cookies)
try:
result, resp = self._fetch_url(url, req_body, req_headers, q)
if self.decode:
result = self.decode_result(result, resp)
except Exception as e:
q(('error', None, e))
else:
if not self._cancel_event.is_set():
q(('success', resp, result))
else:
q(('killed', None, None))
# using trigger can result in a missed on_success event
self._trigger_result()
# clean ourself when the queue is empty
while len(self._queue):
sleep(.1)
self._trigger_result()
# ok, authorize the GC to clean us.
if self in g_requests:
g_requests.remove(self)
def _parse_url(self, url):
parse = urlparse(url)
host = parse.hostname
port = parse.port
userpass = None
# append user + pass to hostname if specified
if parse.username and parse.password:
userpass = {
"Authorization": "Basic {}".format(b64encode(
"{}:{}".format(
parse.username,
parse.password
).encode('utf-8')
).decode('utf-8'))
}
return host, port, userpass, parse
def _fetch_url(self, url, body, headers, q):
# Parse and fetch the current url
trigger = self._trigger_result
chunk_size = self._chunk_size
report_progress = self.on_progress is not None
timeout = self._timeout
file_path = self.file_path
ca_file = self.ca_file
verify = self.verify
if self._debug:
Logger.debug('UrlRequest: {0} Fetch url <{1}>'.format(
id(self), url))
Logger.debug('UrlRequest: {0} - body: {1}'.format(
id(self), body))
Logger.debug('UrlRequest: {0} - headers: {1}'.format(
id(self), headers))
# parse url
host, port, userpass, parse = self._parse_url(url)
if userpass and not headers:
headers = userpass
elif userpass and headers:
key = list(userpass.keys())[0]
headers[key] = userpass[key]
# translate scheme to connection class
cls = self.get_connection_for_scheme(parse.scheme)
# reconstruct path to pass on the request
path = parse.path
if parse.params:
path += ';' + parse.params
if parse.query:
path += '?' + parse.query
if parse.fragment:
path += '#' + parse.fragment
# create connection instance
args = {}
if timeout is not None:
args['timeout'] = timeout
if (ca_file is not None and hasattr(ssl, 'create_default_context') and
parse.scheme == 'https'):
ctx = ssl.create_default_context(cafile=ca_file)
ctx.verify_mode = ssl.CERT_REQUIRED
args['context'] = ctx
if not verify and parse.scheme == 'https' and (
hasattr(ssl, 'create_default_context')):
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
args['context'] = ctx
if self._proxy_host:
Logger.debug('UrlRequest: {0} - proxy via {1}:{2}'.format(
id(self), self._proxy_host, self._proxy_port
))
req = cls(self._proxy_host, self._proxy_port, **args)
if parse.scheme == 'https':
req.set_tunnel(host, port, self._proxy_headers)
else:
path = urlunparse(parse)
else:
req = cls(host, port, **args)
# send request
method = self._method
if method is None:
method = 'GET' if body is None else 'POST'
req.request(method, path, body, headers or {})
# read header
resp = req.getresponse()
# read content
if report_progress or file_path is not None:
try:
total_size = int(resp.getheader('content-length'))
except:
total_size = -1
# before starting the download, send a fake progress to permit the
# user to initialize his ui
if report_progress:
q(('progress', resp, (0, total_size)))
def get_chunks(fd=None):
bytes_so_far = 0
result = b''
while 1:
chunk = resp.read(chunk_size)
if not chunk:
break
if fd:
fd.write(chunk)
else:
result += chunk
bytes_so_far += len(chunk)
# report progress to user
if report_progress:
q(('progress', resp, (bytes_so_far, total_size)))
trigger()
if self._cancel_event.is_set():
break
return bytes_so_far, result
if file_path is not None:
with open(file_path, 'wb') as fd:
bytes_so_far, result = get_chunks(fd)
else:
bytes_so_far, result = get_chunks()
# ensure that results are dispatched for the last chunk,
# avoid trigger
if report_progress:
q(('progress', resp, (bytes_so_far, total_size)))
trigger()
else:
result = resp.read()
try:
if isinstance(result, bytes):
result = result.decode('utf-8')
except UnicodeDecodeError:
# if it's an image? decoding would not work
pass
req.close()
# return everything
return result, resp
def get_connection_for_scheme(self, scheme):
'''Return the Connection class for a particular scheme.
This is an internal function that can be expanded to support custom
schemes.
Actual supported schemes: http, https.
'''
if scheme == 'http':
return HTTPConnection
elif scheme == 'https' and HTTPSConnection is not None:
return HTTPSConnection
else:
raise Exception('No class for scheme %s' % scheme)
def decode_result(self, result, resp):
'''Decode the result fetched from url according to his Content-Type.
Currently supports only application/json.
'''
# Entry to decode url from the content type.
# For example, if the content type is a json, it will be automatically
# decoded.
content_type = resp.getheader('Content-Type', None)
if content_type is not None:
ct = content_type.split(';')[0]
if ct == 'application/json':
if isinstance(result, bytes):
result = result.decode('utf-8')
try:
return loads(result)
except:
return result
return result
def _dispatch_result(self, dt):
while True:
# Read the result pushed on the queue, and dispatch to the client
try:
result, resp, data = self._queue.pop()
except IndexError:
return
if resp:
# Small workaround in order to prevent the situation mentioned
# in the comment below
final_cookies = ""
parsed_headers = []
for key, value in resp.getheaders():
if key == "Set-Cookie":
final_cookies += "{};".format(value)
else:
parsed_headers.append((key, value))
parsed_headers.append(("Set-Cookie", final_cookies[:-1]))
# XXX usage of dict can be dangerous if multiple headers
# are set even if it's invalid. But it look like it's ok
# ? http://stackoverflow.com/questions/2454494/..
# ..urllib2-multiple-set-cookie-headers-in-response
self._resp_headers = dict(parsed_headers)
self._resp_status = resp.status
if result == 'success':
status_class = resp.status // 100
if status_class in (1, 2):
if self._debug:
Logger.debug('UrlRequest: {0} Download finished with'
' {1} datalen'.format(id(self),
len(data)))
self._is_finished = True
self._result = data
if self.on_success:
func = self.on_success()
if func:
func(self, data)
elif status_class == 3:
if self._debug:
Logger.debug('UrlRequest: {} Download '
'redirected'.format(id(self)))
self._is_finished = True
self._result = data
if self.on_redirect:
func = self.on_redirect()
if func:
func(self, data)
elif status_class in (4, 5):
if self._debug:
Logger.debug('UrlRequest: {} Download failed with '
'http error {}'.format(id(self),
resp.status))
self._is_finished = True
self._result = data
if self.on_failure:
func = self.on_failure()
if func:
func(self, data)
elif result == 'error':
if self._debug:
Logger.debug('UrlRequest: {0} Download error '
'<{1}>'.format(id(self), data))
self._is_finished = True
self._error = data
if self.on_error:
func = self.on_error()
if func:
func(self, data)
elif result == 'progress':
if self._debug:
Logger.debug('UrlRequest: {0} Download progress '
'{1}'.format(id(self), data))
if self.on_progress:
func = self.on_progress()
if func:
func(self, data[0], data[1])
elif result == 'killed':
if self._debug:
Logger.debug('UrlRequest: Cancelled by user')
if self.on_cancel:
func = self.on_cancel()
if func:
func(self)
else:
assert(0)
@property
def is_finished(self):
'''Return True if the request has finished, whether it's a
success or a failure.
'''
return self._is_finished
@property
def result(self):
'''Return the result of the request.
This value is not determined until the request is finished.
'''
return self._result
@property
def resp_headers(self):
'''If the request has been completed, return a dictionary containing
the headers of the response. Otherwise, it will return None.
'''
return self._resp_headers
@property
def resp_status(self):
'''Return the status code of the response if the request is complete,
otherwise return None.
'''
return self._resp_status
@property
def error(self):
'''Return the error of the request.
This value is not determined until the request is completed.
'''
return self._error
@property
def chunk_size(self):
'''Return the size of a chunk, used only in "progress" mode (when
on_progress callback is set.)
'''
return self._chunk_size
def wait(self, delay=0.5):
'''Wait for the request to finish (until :attr:`resp_status` is not
None)
.. note::
This method is intended to be used in the main thread, and the
callback will be dispatched from the same thread
from which you're calling.
.. versionadded:: 1.1.0
'''
while self.resp_status is None:
self._dispatch_result(delay)
sleep(delay)
def cancel(self):
'''Cancel the current request. It will be aborted, and the result
will not be dispatched. Once cancelled, the callback on_cancel will
be called.
.. versionadded:: 1.11.0
'''
self._cancel_event.set()
if __name__ == '__main__':
from pprint import pprint
def on_success(req, result):
pprint('Got the result:')
pprint(result)
def on_error(req, error):
pprint('Got an error:')
pprint(error)
Clock.start_clock()
req = UrlRequest('https://en.wikipedia.org/w/api.php?format'
'=json&action=query&titles=Kivy&prop=revisions&rvprop=content',
on_success, on_error)
while not req.is_finished:
sleep(1)
Clock.tick()
Clock.stop_clock()
print('result =', req.result)
print('error =', req.error)
| 34.972686 | 79 | 0.559465 |
795619686fdfb5131e3c5ae79cd406e379d7047e | 3,011 | py | Python | jarvis/User.py | open-jarvis/jarvis-pip | 96eb11a2a3b4eac0449528095e17302f564b64dd | [
"MIT"
] | null | null | null | jarvis/User.py | open-jarvis/jarvis-pip | 96eb11a2a3b4eac0449528095e17302f564b64dd | [
"MIT"
] | null | null | null | jarvis/User.py | open-jarvis/jarvis-pip | 96eb11a2a3b4eac0449528095e17302f564b64dd | [
"MIT"
] | null | null | null | """
Copyright (c) 2021 Philipp Scheer
"""
import time
import json
import random
import traceback
import jinja2
from jarvis import Database, Security, Config
class User:
def __init__(self, id=None, username=None, password=None, _id=None, **data) -> None:
self.id = id or _id
self.username = username
self.password = password
self.data = data
def save(self):
try:
old_user = User.from_id(self.id)
ud = { **self.data,
"username": self.username,
"password": self.password,
}
if old_user: # we need to get the _rev and id
ud = { **ud,
"_rev": old_user._rev,
"_id": old_user.id
}
Database().table("users").insert(ud)
self.id = ud["_id"]
return ud["_id"]
except Exception:
return False
def to_json(self):
return self.__dict__()
def get(self, key, or_else):
return self.__dict__().get(key, or_else)
@classmethod
def new(cls, username, password, **additional_data):
if User.exists(username):
return None
obj = {
**additional_data,
"username": username,
"password": User.hash(password)
}
user = cls(**obj)
user.save()
return user
@classmethod
def from_id(cls, id):
res = Database().table("users").find({ "_id": { "$eq": id } })
if res.found:
res = res[0]
return cls(**res)
return None
@classmethod
def from_email(cls, email):
res = Database().table("users").find({ "email": { "$eq": email }})
if res.found:
res = res[0]
return cls(**res)
return None
@classmethod
def from_json(cls, jsonObject: dict):
if isinstance(jsonObject, str):
jsonObject = json.loads(jsonObject)
return cls(**jsonObject)
def __dict__(self):
return {
**self.data,
"id": self.id,
"username": self.username,
"password": self.password,
}
def __getitem__(self, key):
return self.__dict__().get(key, None)
def __getattr__(self, key):
return self.__dict__().get(key, None)
@staticmethod
def validate(username, password):
result = Database().table("users").find({
"username": { "$eq": username },
"password": { "$eq": User.hash(password) }
})
if result.found:
return result[0]["_id"]
return False
@staticmethod
def hash(password):
return Security.password_hash(password)
@staticmethod
def exists(username):
return len(list(Database().table("users").find({ "username": { "$eq": username }}))) > 0
@staticmethod
def count():
return len(list(Database().table("users").all()))
| 25.956897 | 96 | 0.524743 |
795619b2b73dab42914746456fe815050ae59836 | 6,860 | py | Python | hp_tune.py | rbiswas143/deep-audioviz-experiments-train | 294c648ca9115efce6127fb242ac3f6f51cdf532 | [
"MIT"
] | 9 | 2018-12-31T08:33:20.000Z | 2020-10-10T21:06:22.000Z | hp_tune.py | rbiswas143/deep-audioviz-experiments-train | 294c648ca9115efce6127fb242ac3f6f51cdf532 | [
"MIT"
] | 10 | 2021-03-08T21:14:13.000Z | 2022-03-11T23:25:12.000Z | hp_tune.py | rbiswas143/deep-audioviz-experiments-train | 294c648ca9115efce6127fb242ac3f6f51cdf532 | [
"MIT"
] | 1 | 2019-10-22T20:13:23.000Z | 2019-10-22T20:13:23.000Z | """CLI and utils for training a batch of models and analysing hyper parameter tuning results"""
import train
import models
import data_processor as dp
import commons
import argparse
import torch
import os
import collections
def train_models(training_configs, email=False):
"""Train a batch of models"""
for i, config in enumerate(training_configs):
print('\nTraining Model {} of {}: {}'.format(i + 1, len(training_configs), config.name))
train.train(config, plot_learning_curves=False, cuda=torch.cuda.is_available(), email=email)
print('All Models have been evaluated')
def print_evaluation_report(training_config):
"""Print the training and evaluation results for a model"""
# Training Config
print('Training Config')
for key, val in training_config.__dict__.items():
print('{}\t{}'.format(key, val))
print()
# Checkpoint
model = training_config.get_by_model_key(False)
checkpoint = models.ModelCheckpoint(model)
checkpoint.load(training_config.get_model_path('checkpoint'))
if not checkpoint.loaded:
print('Not evaluated')
return
print('Last checkpoint stats')
for key, val in checkpoint.__dict__.items():
print('{}\t{}'.format(key, val))
def _get_hps_for_autoencoder(training_config, checkpoint):
hps = collections.OrderedDict()
hps['name'] = training_config.name
hps['trainable_params'] = checkpoint.trainable_params
hps['epoch'] = checkpoint.epoch
hps['best_loss'] = checkpoint.best_loss
hps['batch_size'] = training_config.batch_size
hps['lr'] = training_config.model_params['lr']
hps['momentum'] = training_config.model_params['momentum']
hps['num_init_filters'] = training_config.model_params['num_init_filters']
hps['num_pools'] = training_config.model_params['num_pools']
hps['num_fc'] = training_config.model_params['num_fc']
hps['fc_scale_down'] = training_config.model_params['fc_scale_down']
hps['kernel_size'] = training_config.model_params['kernel_size']
hps['shared_weights'] = training_config.model_params['shared_weights']
hps['skip_connections'] = training_config.model_params['skip_connections']
return ["{} : {}".format(key, val) for key, val in hps.items()]
def _get_hps_for_classifier(training_config, checkpoint):
hps = collections.OrderedDict()
hps['name'] = training_config.name
hps['trainable_params'] = checkpoint.trainable_params
hps['epoch'] = checkpoint.epoch
hps['best_loss'] = checkpoint.best_loss
hps['batch_size'] = training_config.batch_size
hps['lr'] = training_config.model_params['lr']
hps['momentum'] = training_config.model_params['momentum']
hps['arch'] = training_config.model_params['arch']
hps['batchnorm'] = training_config.model_params['batchnorm']
return ["{} : {}".format(key, val) for key, val in hps.items()]
def save_evaluation_report(training_configs, config_path):
"""Compile and save hyper-tuning report for all models in the batch"""
hps = []
for i, training_config in enumerate(training_configs):
print('Saving report for Model {}: {}'.format(i + 1, training_config.name))
model = training_config.get_by_model_key(False)
checkpoint = models.ModelCheckpoint(model)
checkpoint.load(training_config.get_model_path('checkpoint'))
if not checkpoint.loaded:
print('Not evaluated')
continue
if training_config.model == 'conv_autoencoder':
hps.append(_get_hps_for_autoencoder(training_config, checkpoint))
elif training_config.model == 'cnn_classifier':
hps.append(_get_hps_for_classifier(training_config, checkpoint))
else:
raise Exception('Invalid model code: {}'.format(training_configs.model))
with open(os.path.join(os.path.dirname(config_path), 'hps.txt'), 'w') as rep_file:
rep_file.write('\n'.join(['\t'.join(hp) for hp in hps]))
def save_evaluation_plots(training_configs):
"""Create and save learning curves for all models in the batch"""
for i, training_config in enumerate(training_configs):
print('Saving plot for Model {}: {}'.format(i + 1, training_config.name))
model = training_config.get_by_model_key(False)
checkpoint = models.ModelCheckpoint(model)
checkpoint.load(training_config.get_model_path('checkpoint'))
if not checkpoint.loaded:
print('Not evaluated')
continue
path = os.path.join(training_config.models_dir, "{}_lc.png".format(training_config.name))
commons.save_learning_curve(checkpoint.training_losses, checkpoint.cv_losses, path)
def cli():
"""Runs CLI"""
# Arguments Parser
parser = argparse.ArgumentParser(description='Hyper Parameter tuning related actions')
parser.add_argument('-c', '--config_files_path', help='Path to a file containing a list of training config files')
parser.add_argument('-m', '--mode', choices=['train', 'print-report', 'save-hps', 'save-plots'], default='train',
help='Action to perform')
parser.add_argument('-e', '--email', action='store_true', help='Send emails')
parser.add_argument('-d', '--dataset', action='store_true', help='Print Dataset Details')
# Parse arguments
args = parser.parse_args()
# Get model configs (read a single config file with newline separated paths to model configs)
if args.config_files_path is None:
raise Exception('Config file not specified')
else:
with open(args.config_files_path, 'r') as cfile:
config_files = cfile.read().split('\n')
train_configs = [train.TrainingConfig.load_from_file(fl) for fl in config_files]
# Actions
if args.mode == 'train':
# Train a batch of models
train_models(train_configs, email=args.email)
elif args.mode == 'print-report':
# Print report for all models
for i, train_config in enumerate(train_configs):
if args.dataset and i == 0:
dataset_config = dp.DataPrepConfig.load_from_dataset(train_config.dataset_path)
print('Dataset config for Model 1')
for key, val in dataset_config.__dict__.items():
print('{}\t{}'.format(key, val))
print()
print('*' * 10 + 'Model {}: {}'.format(i + 1, train_config.name))
print_evaluation_report(train_config)
print()
elif args.mode == 'save-hps':
# Save hyper parameters for all models
save_evaluation_report(train_configs, args.config_files_path)
elif args.mode == 'save-plots':
# Save learning curves for all models
save_evaluation_plots(train_configs)
else:
raise Exception('Invalid mode: ' + args.mode)
if __name__ == '__main__':
cli()
| 42.875 | 118 | 0.681778 |
79561a7395946fdc7d7ebac69fc6d73578cd9bf6 | 10,400 | py | Python | test/test_visuals.py | zayed22/kepler-mapper | 1224e5d8e64e16ebe9e9ef989556a6ea3e2b214e | [
"MIT"
] | 1 | 2020-01-02T04:28:40.000Z | 2020-01-02T04:28:40.000Z | test/test_visuals.py | zayed22/kepler-mapper | 1224e5d8e64e16ebe9e9ef989556a6ea3e2b214e | [
"MIT"
] | 1 | 2019-07-14T20:30:41.000Z | 2019-07-16T08:17:25.000Z | test/test_visuals.py | zayed22/kepler-mapper | 1224e5d8e64e16ebe9e9ef989556a6ea3e2b214e | [
"MIT"
] | null | null | null | import os
import numbers
import json
import pytest
import numpy as np
from sklearn.datasets import make_circles
from kmapper import KeplerMapper
from kmapper import visuals
from kmapper.visuals import (
init_color_function,
format_meta,
format_mapper_data,
_map_val2color,
build_histogram,
_color_function,
)
from jinja2 import Environment, FileSystemLoader
np.random.seed(1)
"""
Interested in rebuilding the API of kepler mapper so it is more intuitive
Should Kepler Mapper be split into two objects?
I don't get how distance_matrix works
The visualize method should have sane defaults.
Tooltips
- [x] Tooltips should default to showing the ID of data point in each node.
- Tooltips should be able to be disabled.
- [was done already?] Tooltips should be able to show aggregate data for each node.
- [copy and pastable] Tooltips should easily be able to export the data.
Graph
- Graph should be able to be frozen.
- Graph should be able to switch between multiple coloring functions.
- Should be able to remove nodes from graph (so you can clean out noise)
- Edits should be able to be saved. Can re-export the html file so you can open it in the same state.
- Color funcs should be easier to use.
- Should be able to choose any D3 palette
- [x] Cold is low, hot is high.
Style:
- [x] 'inverse_X' should just be called X
- [x] More of the html stuff should be in the jinja2 stuff.
- If running from source, should be able to run offline
Map
- Move all of these arguments into the init method
"""
@pytest.fixture
def jinja_env():
# Find the module absolute path and locate templates
module_root = os.path.join(os.path.dirname(__file__), "../kmapper/templates")
env = Environment(loader=FileSystemLoader(module_root))
return env
@pytest.fixture
def default_colorscale():
colorscale = [
[0.0, "rgb(68, 1, 84)"], # Viridis
[0.1, "rgb(72, 35, 116)"],
[0.2, "rgb(64, 67, 135)"],
[0.3, "rgb(52, 94, 141)"],
[0.4, "rgb(41, 120, 142)"],
[0.5, "rgb(32, 144, 140)"],
[0.6, "rgb(34, 167, 132)"],
[0.7, "rgb(68, 190, 112)"],
[0.8, "rgb(121, 209, 81)"],
[0.9, "rgb(189, 222, 38)"],
[1.0, "rgb(253, 231, 36)"],
]
return colorscale
class TestVisualHelpers:
def test_color_function_type(self):
nodes = {"a": [1, 2, 3], "b": [4, 5, 6]}
graph = {"nodes": nodes}
color_function = init_color_function(graph)
assert type(color_function) == np.ndarray
assert min(color_function) == 0
assert max(color_function) == 1
def test_color_function_scaled(self):
nodes = {"a": [1, 2, 3], "b": [4, 5, 6]}
graph = {"nodes": nodes}
cf = np.array([6, 5, 4, 3, 2, 1])
color_function = init_color_function(graph, cf)
# np.testing.assert_almost_equal(min(color_function), 0)
# np.testing.assert_almost_equal(
# max(color_function), 1
# ), "Scaler might have floating point issues, 1.0000...0002"
# build_histogram in visuals.py assumes/needs this
assert min(color_function) == 0
assert max(color_function) == 1
def test_color_hist_matches_nodes(self):
""" The histogram colors dont seem to match the node colors, this should confirm the colors will match and we need to look at the javascript instead.
"""
color_function = np.array([0.55] * 10 + [0.0] * 10)
member_ids = [1, 2, 3, 4, 5, 6]
hist = build_histogram(color_function[member_ids])
c = round(_color_function(member_ids, color_function), 2)
single_bar = [bar for bar in hist if bar["perc"] == 100.0]
assert len(single_bar) == 1
assert _map_val2color(c, 0.0, 1.0) == single_bar[0]["color"]
def test_color_function_size(self):
nodes = {"a": [1, 2, 3], "b": [4, 5, 6, 7, 8, 9]}
graph = {"nodes": nodes}
color_function = init_color_function(graph)
assert len(color_function) == len(nodes["a"]) + len(nodes["b"]) + 1
def test_format_meta(self):
mapper = KeplerMapper()
data = np.random.rand(1000, 10)
lens = mapper.fit_transform(data, projection=[0])
graph = mapper.map(lens, data)
fmt = format_meta(graph)
assert fmt["n_nodes"] == len(graph["nodes"])
assert "n_edges" in fmt.keys()
assert "n_total" in fmt.keys()
del fmt["custom_meta"]
vals = fmt.values()
for v in vals:
assert isinstance(v, numbers.Number)
def test_format_meta_with_meta(self):
mapper = KeplerMapper()
data = np.random.rand(1000, 10)
lens = mapper.fit_transform(data, projection=[0])
graph = mapper.map(lens, data)
cm = "My custom_meta"
fmt = format_meta(graph, cm)
assert fmt["custom_meta"] == cm
def test_format_mapper_data(self, jinja_env):
mapper = KeplerMapper()
data, labels = make_circles(1000, random_state=0)
lens = mapper.fit_transform(data, projection=[0])
graph = mapper.map(lens, data)
color_function = lens[:, 0]
inverse_X = data
projected_X = lens
projected_X_names = ["projected_%s" % (i) for i in range(projected_X.shape[1])]
inverse_X_names = ["inverse_%s" % (i) for i in range(inverse_X.shape[1])]
custom_tooltips = np.array(["customized_%s" % (l) for l in labels])
graph_data = format_mapper_data(
graph,
color_function,
inverse_X,
inverse_X_names,
projected_X,
projected_X_names,
custom_tooltips,
jinja_env,
)
# print(graph_data)
# Dump to json so we can easily tell what's in it.
graph_data = json.dumps(graph_data)
# TODO test more properties!
assert "name" in graph_data
assert """cube2_cluster0""" in graph_data
assert """projected_0""" in graph_data
assert """inverse_0""" in graph_data
assert """customized_""" in graph_data
def test_histogram(self):
data = np.random.random((100, 1))
hist = visuals.build_histogram(data)
assert isinstance(hist, list)
assert isinstance(hist[0], dict)
assert len(hist) == 10
def test_cluster_stats(self):
X = np.random.random((1000, 3))
ids = np.random.choice(20, 1000)
cluster_data = visuals._format_cluster_statistics(ids, X, ["a", "b", "c"])
assert isinstance(cluster_data, dict)
assert cluster_data["size"] == len(ids)
def test_cluster_stats_above(self):
X = np.ones((1000, 3))
ids = np.random.choice(20, 1000)
X[ids, 0] = 10
cluster_data = visuals._format_cluster_statistics(ids, X, ["a", "b", "c"])
assert len(cluster_data["above"]) >= 1
assert cluster_data["above"][0]["feature"] == "a"
assert cluster_data["above"][0]["mean"] == 10
def test_cluster_stats_below(self):
X = np.ones((1000, 3))
ids = np.random.choice(20, 1000)
X[ids, 0] = 0
cluster_data = visuals._format_cluster_statistics(ids, X, ["a", "b", "c"])
assert len(cluster_data["below"]) >= 1
assert cluster_data["below"][0]["feature"] == "a"
assert cluster_data["below"][0]["mean"] == 0
def test_cluster_stats_with_no_names(self):
# This would be the default.
X = np.ones((1000, 3))
ids = np.random.choice(20, 1000)
X[ids, 0] = 0
cluster_data = visuals._format_cluster_statistics(ids, X, [])
assert len(cluster_data["below"]) >= 1
assert cluster_data["below"][0]["feature"] == "f_0"
assert cluster_data["below"][0]["mean"] == 0
class TestVisualizeIntegration:
def test_empty_graph_warning(self):
mapper = KeplerMapper()
graph = {"nodes": {}}
with pytest.raises(Exception):
mapper.visualize(graph)
def test_visualize_standalone_same(self, tmpdir):
""" ensure that the visualization is not dependent on the actual mapper object.
"""
mapper = KeplerMapper()
file = tmpdir.join("output.html")
data = np.random.rand(1000, 10)
lens = mapper.fit_transform(data, projection=[0])
graph = mapper.map(lens, data)
viz1 = mapper.visualize(graph, path_html=file.strpath)
new_mapper = KeplerMapper()
viz2 = new_mapper.visualize(graph, path_html=file.strpath)
assert viz1 == viz2
def test_file_written(self, tmpdir):
mapper = KeplerMapper()
file = tmpdir.join("output.html")
data = np.random.rand(1000, 2)
lens = mapper.fit_transform(data, projection=[0])
graph = mapper.map(lens, data)
viz = mapper.visualize(graph, path_html=file.strpath)
assert file.read() == viz
assert len(tmpdir.listdir()) == 1, "file was written to"
def test_file_not_written(self, tmpdir):
mapper = KeplerMapper(verbose=1)
file = tmpdir.join("output.html")
data = np.random.rand(1000, 10)
lens = mapper.fit_transform(data, projection=[0])
graph = mapper.map(lens, data)
viz = mapper.visualize(graph, path_html=file.strpath, save_file=False)
assert len(tmpdir.listdir()) == 0, "file was never written to"
# assert file.read() != viz
class TestColorhandling:
def test_map_val2color_on_point(self, default_colorscale):
""" This function takes a val, a min and max, and a color scale, and finds the color the val should be """
for v, color in default_colorscale:
c = _map_val2color(v, 0.0, 1.0, default_colorscale)
assert c == color
def test_mid_val2color(self, default_colorscale):
expected = int((72 + 68) / 2), int((35 + 1) / 2), int((116 + 85) / 2)
expected_str = (
"rgb("
+ str(expected[0])
+ ", "
+ str(expected[1])
+ ", "
+ str(expected[2])
+ ")"
)
c = _map_val2color(0.05, 0.0, 1.0, default_colorscale)
assert c == expected_str
| 32.098765 | 157 | 0.600865 |
79561c740266e0f9327b31e97913957bf2b4a70d | 2,735 | py | Python | app/core/test/test_models.py | HemelAkash/recipe-app-api | 7754f50f9de2889b9f8193e88db4eca99ea06f7b | [
"MIT"
] | null | null | null | app/core/test/test_models.py | HemelAkash/recipe-app-api | 7754f50f9de2889b9f8193e88db4eca99ea06f7b | [
"MIT"
] | null | null | null | app/core/test/test_models.py | HemelAkash/recipe-app-api | 7754f50f9de2889b9f8193e88db4eca99ea06f7b | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.contrib.auth import get_user_model
from core import models
from unittest.mock import patch
class ModelTests(TestCase):
def test_create_user_with_email_successful(self):
"""Test creating a new user with an email is successful"""
email = 'test@londonappdev.com'
password = 'Password123'
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_new_user_email_normalized(self):
"""Test the email for a new user is normalized"""
email = 'test@LONDONAPPDEV.com'
user = get_user_model().objects.create_user(email, 'test123')
self.assertEqual(user.email, email.lower())
def test_new_user_invalid_email(self):
"""Test creating user with no email raises error"""
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, 'test123')
def test_create_new_superuser(self):
"""Test creating a new superuser"""
user = get_user_model().objects.create_superuser(
'test@LONDONAPPDEV.com',
'test123'
)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
def sample_user(email='test@londonappdev.com', password='testpass'):
"""Create a sample user"""
return get_user_model().objects.create_user(email, password)
def test_tag_str(self):
"""Test the tag string representation"""
tag = models.Tag.objects.create(
user=sample_user(),
name='Vegan'
)
self.assertEqual(str(tag), tag.name)
def test_ingredient_str(self):
"""Test the ingredient string representation"""
ingredient = models.Ingredient.objects.create(
user=sample_user(),
name='Cucumber'
)
self.assertEqual(str(ingredient), ingredient.name)
def test_recipe_str(self):
"""Test the recipe string representation"""
recipe = models.Recipe.objects.create(
user=sample_user(),
title='Steak and mushroom sauce',
time_minutes=5,
price=5.00
)
self.assertEqual(str(recipe), recipe.title)
@patch('uuid.uuid4')
def test_recipe_file_name_uuid(self, mock_uuid):
"""Test that image is saved in the correct location"""
uuid = 'test-uuid'
mock_uuid.return_value = uuid
file_path = models.recipe_image_file_path(None, 'myimage.jpg')
exp_path = f'uploads/recipe/{uuid}.jpg'
self.assertEqual(file_path, exp_path) | 32.559524 | 72 | 0.640219 |
79561c85a52ac7be63a689d8567b6f56f78900dc | 1,464 | py | Python | service/ws_re/register/_base.py | the-it/WS_THEbotIT | 5630382e697a8b7432e0cf63a05a45fe43064caa | [
"MIT"
] | 5 | 2019-01-21T19:59:27.000Z | 2021-02-06T12:56:28.000Z | service/ws_re/register/_base.py | the-it/WS_THEbotIT | 5630382e697a8b7432e0cf63a05a45fe43064caa | [
"MIT"
] | 697 | 2017-11-19T12:41:11.000Z | 2022-03-31T07:35:04.000Z | service/ws_re/register/_base.py | the-it/WS_THEbotIT | 5630382e697a8b7432e0cf63a05a45fe43064caa | [
"MIT"
] | 1 | 2018-02-18T23:01:13.000Z | 2018-02-18T23:01:13.000Z | from __future__ import annotations
from abc import ABC
from pathlib import Path
from typing import Tuple, List, TYPE_CHECKING
if TYPE_CHECKING:
from service.ws_re.register.lemma import Lemma
_REGISTER_PATH: Path = Path(__file__).parent.joinpath("data")
class RegisterException(Exception):
pass
class Register(ABC):
def __init__(self):
self._lemmas: List[Lemma] = []
@property
def lemmas(self) -> List[Lemma]:
return self._lemmas
@staticmethod
def squash_lemmas(lemmas: List[Lemma]) -> List[List[Lemma]]:
return_lemmas = []
last_lemmas: List[Lemma] = []
for lemma in lemmas:
if last_lemmas:
if lemma["lemma"] == last_lemmas[-1]["lemma"]:
last_lemmas.append(lemma)
continue
return_lemmas.append(last_lemmas)
last_lemmas = []
last_lemmas.append(lemma)
if last_lemmas:
return_lemmas.append(last_lemmas)
return return_lemmas
@property
def proof_read(self) -> Tuple[int, int, int]:
fer = kor = unk = 0
for lemma in self.lemmas:
proof_read = lemma["proof_read"]
if proof_read:
if proof_read == 3:
fer += 1
elif proof_read == 2:
kor += 1
elif proof_read == 1:
unk += 1
return fer, kor, unk
| 27.111111 | 64 | 0.560109 |
79561d548a1b28fe15d9774a4099dac3840c5c36 | 12,702 | py | Python | mne/viz/tests/test_ica.py | rhotter/mne-python | f0ced56fadb369f2542c801d60c4472b2f545721 | [
"BSD-3-Clause"
] | null | null | null | mne/viz/tests/test_ica.py | rhotter/mne-python | f0ced56fadb369f2542c801d60c4472b2f545721 | [
"BSD-3-Clause"
] | null | null | null | mne/viz/tests/test_ica.py | rhotter/mne-python | f0ced56fadb369f2542c801d60c4472b2f545721 | [
"BSD-3-Clause"
] | null | null | null | # Authors: Denis Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: Simplified BSD
import os.path as op
import numpy as np
from numpy.testing import assert_equal, assert_array_equal
import pytest
import matplotlib.pyplot as plt
from mne import read_events, Epochs, read_cov, pick_types, Annotations
from mne.io import read_raw_fif
from mne.preprocessing import ICA, create_ecg_epochs, create_eog_epochs
from mne.utils import run_tests_if_main, requires_sklearn
from mne.viz.ica import _create_properties_layout, plot_ica_properties
from mne.viz.utils import _fake_click
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_name = op.join(base_dir, 'test-eve.fif')
event_id, tmin, tmax = 1, -0.1, 0.2
raw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
def _get_raw(preload=False):
"""Get raw data."""
return read_raw_fif(raw_fname, preload=preload)
def _get_events():
"""Get events."""
return read_events(event_name)
def _get_picks(raw):
"""Get picks."""
return [0, 1, 2, 6, 7, 8, 12, 13, 14] # take a only few channels
def _get_epochs():
"""Get epochs."""
raw = _get_raw()
events = _get_events()
picks = _get_picks(raw)
with pytest.warns(RuntimeWarning, match='projection'):
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks)
return epochs
@requires_sklearn
def test_plot_ica_components():
"""Test plotting of ICA solutions."""
res = 8
fast_test = {"res": res, "contours": 0, "sensors": False}
raw = _get_raw()
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
ica_picks = _get_picks(raw)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=ica_picks)
for components in [0, [0], [0, 1], [0, 1] * 2, None]:
ica.plot_components(components, image_interp='bilinear',
colorbar=True, **fast_test)
plt.close('all')
# test interactive mode (passing 'inst' arg)
ica.plot_components([0, 1], image_interp='bilinear', inst=raw, res=16)
fig = plt.gcf()
# test title click
# ----------------
lbl = fig.axes[1].get_label()
ica_idx = int(lbl[-3:])
titles = [ax.title for ax in fig.axes]
title_pos_midpoint = (titles[1].get_window_extent().extents
.reshape((2, 2)).mean(axis=0))
# first click adds to exclude
_fake_click(fig, fig.axes[1], title_pos_midpoint, xform='pix')
assert ica_idx in ica.exclude
# clicking again removes from exclude
_fake_click(fig, fig.axes[1], title_pos_midpoint, xform='pix')
assert ica_idx not in ica.exclude
# test topo click
# ---------------
_fake_click(fig, fig.axes[1], (0., 0.), xform='data')
c_fig = plt.gcf()
labels = [ax.get_label() for ax in c_fig.axes]
for label in ['topomap', 'image', 'erp', 'spectrum', 'variance']:
assert label in labels
topomap_ax = c_fig.axes[labels.index('topomap')]
title = topomap_ax.get_title()
assert (lbl == title)
ica.info = None
with pytest.raises(RuntimeError, match='fit the ICA'):
ica.plot_components(1, ch_type='mag')
plt.close('all')
@requires_sklearn
def test_plot_ica_properties():
"""Test plotting of ICA properties."""
res = 8
raw = _get_raw(preload=True)
raw.add_proj([], remove_existing=True)
events = _get_events()
picks = _get_picks(raw)[:6]
pick_names = [raw.ch_names[k] for k in picks]
raw.pick_channels(pick_names)
reject = dict(grad=4000e-13, mag=4e-12)
epochs = Epochs(raw, events[:10], event_id, tmin, tmax,
baseline=(None, 0), preload=True)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2, max_iter=1,
max_pca_components=2, n_pca_components=2, random_state=0)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw)
# test _create_properties_layout
fig, ax = _create_properties_layout()
assert_equal(len(ax), 5)
topoargs = dict(topomap_args={'res': res, 'contours': 0, "sensors": False})
ica.plot_properties(raw, picks=0, **topoargs)
ica.plot_properties(epochs, picks=1, dB=False, plot_std=1.5, **topoargs)
ica.plot_properties(epochs, picks=1, image_args={'sigma': 1.5},
topomap_args={'res': 10, 'colorbar': True},
psd_args={'fmax': 65.}, plot_std=False,
figsize=[4.5, 4.5], reject=reject)
plt.close('all')
pytest.raises(TypeError, ica.plot_properties, epochs, dB=list('abc'))
pytest.raises(TypeError, ica.plot_properties, ica)
pytest.raises(TypeError, ica.plot_properties, [0.2])
pytest.raises(TypeError, plot_ica_properties, epochs, epochs)
pytest.raises(TypeError, ica.plot_properties, epochs,
psd_args='not dict')
pytest.raises(ValueError, ica.plot_properties, epochs, plot_std=[])
fig, ax = plt.subplots(2, 3)
ax = ax.ravel()[:-1]
ica.plot_properties(epochs, picks=1, axes=ax, **topoargs)
fig = ica.plot_properties(raw, picks=[0, 1], **topoargs)
assert_equal(len(fig), 2)
pytest.raises(TypeError, plot_ica_properties, epochs, ica, picks=[0, 1],
axes=ax)
pytest.raises(ValueError, ica.plot_properties, epochs, axes='not axes')
plt.close('all')
# Test merging grads.
pick_names = raw.ch_names[:15:2] + raw.ch_names[1:15:2]
raw = _get_raw(preload=True).pick_channels(pick_names)
raw.info.normalize_proj()
ica = ICA(random_state=0, max_iter=1)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(raw)
ica.plot_properties(raw)
plt.close('all')
# Test handling of zeros
raw._data[:] = 0
with pytest.warns(None): # Usually UserWarning: Infinite value .* for epo
ica.plot_properties(raw)
ica = ICA(random_state=0, max_iter=1)
epochs.pick_channels(pick_names)
with pytest.warns(UserWarning, match='did not converge'):
ica.fit(epochs)
epochs._data[0] = 0
with pytest.warns(None): # Usually UserWarning: Infinite value .* for epo
ica.plot_properties(epochs)
plt.close('all')
@requires_sklearn
def test_plot_ica_sources():
"""Test plotting of ICA panel."""
raw = read_raw_fif(raw_fname).crop(0, 1).load_data()
picks = _get_picks(raw)
epochs = _get_epochs()
raw.pick_channels([raw.ch_names[k] for k in picks])
ica_picks = pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False, exclude='bads')
ica = ICA(n_components=2, max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=ica_picks)
ica.exclude = [1]
fig = ica.plot_sources(raw)
fig.canvas.key_press_event('escape')
# Sadly close_event isn't called on Agg backend and the test always passes.
assert_array_equal(ica.exclude, [1])
plt.close('all')
# dtype can change int->np.int after load, test it explicitly
ica.n_components_ = np.int64(ica.n_components_)
fig = ica.plot_sources(raw)
# also test mouse clicks
data_ax = fig.axes[0]
assert len(plt.get_fignums()) == 1
_fake_click(fig, data_ax, [-0.1, 0.9]) # click on y-label
assert len(plt.get_fignums()) == 2
ica.exclude = [1]
ica.plot_sources(raw)
# test with annotations
orig_annot = raw.annotations
raw.set_annotations(Annotations([0.2], [0.1], 'Test'))
fig = ica.plot_sources(raw)
assert len(fig.axes[0].collections) == 1
assert len(fig.axes[1].collections) == 1
raw.set_annotations(orig_annot)
raw.info['bads'] = ['MEG 0113']
with pytest.raises(RuntimeError, match="Raw doesn't match fitted data"):
ica.plot_sources(inst=raw)
ica.plot_sources(epochs)
epochs.info['bads'] = ['MEG 0113']
with pytest.raises(RuntimeError, match="Epochs don't match fitted data"):
ica.plot_sources(inst=epochs)
epochs.info['bads'] = []
ica.plot_sources(epochs.average())
evoked = epochs.average()
fig = ica.plot_sources(evoked)
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax,
[line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax,
[ax.get_xlim()[0], ax.get_ylim()[1]], 'data')
# plot with bad channels excluded
ica.exclude = [0]
ica.plot_sources(evoked)
ica.labels_ = dict(eog=[0])
ica.labels_['eog/0/crazy-channel'] = [0]
ica.plot_sources(evoked) # now with labels
with pytest.raises(ValueError, match='must be of Raw or Epochs type'):
ica.plot_sources('meeow')
plt.close('all')
@requires_sklearn
def test_plot_ica_overlay():
"""Test plotting of ICA cleaning."""
raw = _get_raw(preload=True)
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3, random_state=0)
# can't use info.normalize_proj here because of how and when ICA and Epochs
# objects do picking of Raw data
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=picks)
# don't test raw, needs preload ...
with pytest.warns(RuntimeWarning, match='projection'):
ecg_epochs = create_ecg_epochs(raw, picks=picks)
ica.plot_overlay(ecg_epochs.average())
with pytest.warns(RuntimeWarning, match='projection'):
eog_epochs = create_eog_epochs(raw, picks=picks)
ica.plot_overlay(eog_epochs.average())
pytest.raises(TypeError, ica.plot_overlay, raw[:2, :3][0])
pytest.raises(TypeError, ica.plot_overlay, raw, exclude=2)
ica.plot_overlay(raw)
plt.close('all')
# smoke test for CTF
raw = read_raw_fif(raw_ctf_fname)
raw.apply_gradient_compensation(3)
picks = pick_types(raw.info, meg=True, ref_meg=False)
ica = ICA(n_components=2, max_pca_components=3, n_pca_components=3)
ica.fit(raw, picks=picks)
with pytest.warns(RuntimeWarning, match='longer than'):
ecg_epochs = create_ecg_epochs(raw)
ica.plot_overlay(ecg_epochs.average())
plt.close('all')
@requires_sklearn
def test_plot_ica_scores():
"""Test plotting of ICA scores."""
raw = _get_raw()
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=picks)
ica.labels_ = dict()
ica.labels_['eog/0/foo'] = 0
ica.labels_['eog'] = 0
ica.labels_['ecg'] = 1
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1])
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='foo')
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='eog')
ica.plot_scores([0.3, 0.2], axhline=[0.1, -0.1], labels='ecg')
pytest.raises(
ValueError,
ica.plot_scores,
[0.3, 0.2], axhline=[0.1, -0.1], labels=['one', 'one-too-many'])
pytest.raises(ValueError, ica.plot_scores, [0.2])
plt.close('all')
@requires_sklearn
def test_plot_instance_components():
"""Test plotting of components as instances of raw and epochs."""
raw = _get_raw()
picks = _get_picks(raw)
ica = ICA(noise_cov=read_cov(cov_fname), n_components=2,
max_pca_components=3, n_pca_components=3)
with pytest.warns(RuntimeWarning, match='projection'):
ica.fit(raw, picks=picks)
ica.exclude = [0]
fig = ica.plot_sources(raw, title='Components')
for key in ['down', 'up', 'right', 'left', 'o', '-', '+', '=', 'pageup',
'pagedown', 'home', 'end', 'f11', 'b']:
fig.canvas.key_press_event(key)
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]],
'data')
_fake_click(fig, ax, [-0.1, 0.9]) # click on y-label
fig.canvas.key_press_event('escape')
plt.close('all')
epochs = _get_epochs()
fig = ica.plot_sources(epochs, title='Components')
for key in ['down', 'up', 'right', 'left', 'o', '-', '+', '=', 'pageup',
'pagedown', 'home', 'end', 'f11', 'b']:
fig.canvas.key_press_event(key)
# Test a click
ax = fig.get_axes()[0]
line = ax.lines[0]
_fake_click(fig, ax, [line.get_xdata()[0], line.get_ydata()[0]], 'data')
_fake_click(fig, ax, [-0.1, 0.9]) # click on y-label
fig.canvas.key_press_event('escape')
plt.close('all')
run_tests_if_main()
| 36.710983 | 79 | 0.648717 |
79561ea07bc351c7afa874fd926b9c238d8ae3aa | 1,383 | py | Python | src/compas_blender/artists/polyhedronartist.py | duchaoyu/compas | d484500d68d44fd6e227c3bbee20a2edde6e6c96 | [
"MIT"
] | null | null | null | src/compas_blender/artists/polyhedronartist.py | duchaoyu/compas | d484500d68d44fd6e227c3bbee20a2edde6e6c96 | [
"MIT"
] | null | null | null | src/compas_blender/artists/polyhedronartist.py | duchaoyu/compas | d484500d68d44fd6e227c3bbee20a2edde6e6c96 | [
"MIT"
] | null | null | null | from typing import Optional
from typing import Any
from typing import Union
import bpy
import compas_blender
from compas.geometry import Polyhedron
from compas.artists import ShapeArtist
from .artist import BlenderArtist
class PolyhedronArtist(BlenderArtist, ShapeArtist):
"""Artist for drawing polyhedron shapes.
Parameters
----------
polyhedron : :class:`compas.geometry.Polyhedron`
A COMPAS polyhedron.
collection: str or :class:`bpy.types.Collection`
The name of the collection the object belongs to.
"""
def __init__(self,
polyhedron: Polyhedron,
collection: Optional[Union[str, bpy.types.Collection]] = None,
**kwargs: Any):
super().__init__(shape=polyhedron, collection=collection or polyhedron.name, **kwargs)
def draw(self, color=None):
"""Draw the polyhedron associated with the artist.
Parameters
----------
color : tuple of float, optional
The RGB color of the polyhedron.
Returns
-------
list
The objects created in Blender.
"""
color = color or self.color
vertices, faces = self.shape.to_vertices_and_faces()
obj = compas_blender.draw_mesh(vertices, faces, name=self.shape.name, color=color, collection=self.collection)
return [obj]
| 29.425532 | 118 | 0.646421 |
79561ead8af94890505d02e8a13ad94d1533d837 | 818 | py | Python | venv/lib/python3.6/site-packages/sqlalchemy/dialects/sqlite/__init__.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/sqlalchemy/dialects/sqlite/__init__.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | 1 | 2021-06-01T23:32:38.000Z | 2021-06-01T23:32:38.000Z | venv/lib/python3.6/site-packages/sqlalchemy/dialects/sqlite/__init__.py | aitoehigie/britecore_flask | eef1873dbe6b2cc21f770bc6dec783007ae4493b | [
"MIT"
] | null | null | null | # sqlite/__init__.py
# Copyright (C) 2005-2018 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from . import base, pysqlite, pysqlcipher # noqa
from sqlalchemy.dialects.sqlite.base import (
BLOB,
BOOLEAN,
CHAR,
DATE,
DATETIME,
DECIMAL,
FLOAT,
INTEGER,
REAL,
NUMERIC,
SMALLINT,
TEXT,
TIME,
TIMESTAMP,
VARCHAR,
)
# default dialect
base.dialect = dialect = pysqlite.dialect
__all__ = (
"BLOB",
"BOOLEAN",
"CHAR",
"DATE",
"DATETIME",
"DECIMAL",
"FLOAT",
"INTEGER",
"NUMERIC",
"SMALLINT",
"TEXT",
"TIME",
"TIMESTAMP",
"VARCHAR",
"REAL",
"dialect",
)
| 16.36 | 69 | 0.607579 |
79561f1db9bd9b61e263cd76c06177505345ccb9 | 1,187 | py | Python | scripts/normalize_genes_by_ags.py | dcdanko/MetaSUB_CAP | db5672b0206afb3ffe3204b0577a4a5f84b9bcd4 | [
"MIT"
] | 20 | 2017-11-02T13:36:16.000Z | 2021-07-23T12:44:28.000Z | scripts/normalize_genes_by_ags.py | dcdanko/MetaSUB_CAP | db5672b0206afb3ffe3204b0577a4a5f84b9bcd4 | [
"MIT"
] | 30 | 2018-02-22T18:25:02.000Z | 2019-11-06T15:03:34.000Z | scripts/normalize_genes_by_ags.py | dcdanko/MetaSUB_CAP | db5672b0206afb3ffe3204b0577a4a5f84b9bcd4 | [
"MIT"
] | 9 | 2018-04-26T22:12:08.000Z | 2020-08-06T01:04:54.000Z | #! /usr/bin/env python3
import click
'''
Normalize the output of humann2 using
Average Genome Size from microbe census
Gene output from humann2 is already in reads per kilobase (RPK)
In a previous step we normalize by sample depth to get
reads per kilobase millions of reads (RPKM)
Produces output in reads per kilobase per genome (RPKMG)
The source paper for microbe census describes this normalization
in detail
'''
def mcParse(mcFile):
with open(mcFile) as mcf:
for line in mcf:
if 'average_genome_size' in line:
return float(line.strip().split()[1])
return -1
@click.command()
@click.argument('humann2_genes')
@click.argument('microbe_census')
def main(humann2_genes, microbe_census):
ags = mcParse(microbe_census) / (1000 * 1000)
print('# Normalized RPKMG\tAGS={}M'.format(ags))
with open(humann2_genes) as hgs:
for line in hgs:
line = line.strip()
if line[0] == '#':
print(line)
continue
grp, val = line.split('\t')
nval = float(val) / ags
print('{}\t{}'.format(grp, nval))
if __name__ == '__main__':
main()
| 24.729167 | 64 | 0.63353 |
79561f35f7c7633cfa9033565596b4d8b67fb10e | 44,180 | py | Python | qa/rpc-tests/p2p-compactblocks.py | alexandergaldones/bitcoin | 6206252e5073c1cde2e313f2e5a3ca17582c5823 | [
"MIT"
] | null | null | null | qa/rpc-tests/p2p-compactblocks.py | alexandergaldones/bitcoin | 6206252e5073c1cde2e313f2e5a3ca17582c5823 | [
"MIT"
] | 1 | 2017-03-10T16:37:46.000Z | 2017-03-10T16:37:46.000Z | qa/rpc-tests/p2p-compactblocks.py | PETER-ITPE/bitcoin_pjt | 53c300fb525ab3e21206d47d8353f5246b4f24d1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) 2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test compact blocks (BIP 152).
Version 1 compact blocks are pre-segwit (txids)
Version 2 compact blocks are post-segwit (wtxids)
"""
from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment
from test_framework.script import CScript, OP_TRUE
# TestNode: A peer we use to send messages to bitcoind, and store responses.
class TestNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_sendcmpct = []
self.last_headers = None
self.last_inv = None
self.last_cmpctblock = None
self.block_announced = False
self.last_getdata = None
self.last_getheaders = None
self.last_getblocktxn = None
self.last_block = None
self.last_blocktxn = None
# Store the hashes of blocks we've seen announced.
# This is for synchronizing the p2p message traffic,
# so we can eg wait until a particular block is announced.
self.set_announced_blockhashes = set()
def on_sendcmpct(self, conn, message):
self.last_sendcmpct.append(message)
def on_block(self, conn, message):
self.last_block = message
def on_cmpctblock(self, conn, message):
self.last_cmpctblock = message
self.block_announced = True
self.last_cmpctblock.header_and_shortids.header.calc_sha256()
self.set_announced_blockhashes.add(self.last_cmpctblock.header_and_shortids.header.sha256)
def on_headers(self, conn, message):
self.last_headers = message
self.block_announced = True
for x in self.last_headers.headers:
x.calc_sha256()
self.set_announced_blockhashes.add(x.sha256)
def on_inv(self, conn, message):
self.last_inv = message
for x in self.last_inv.inv:
if x.type == 2:
self.block_announced = True
self.set_announced_blockhashes.add(x.hash)
def on_getdata(self, conn, message):
self.last_getdata = message
def on_getheaders(self, conn, message):
self.last_getheaders = message
def on_getblocktxn(self, conn, message):
self.last_getblocktxn = message
def on_blocktxn(self, conn, message):
self.last_blocktxn = message
# Requires caller to hold mininode_lock
def received_block_announcement(self):
return self.block_announced
def clear_block_announcement(self):
with mininode_lock:
self.block_announced = False
self.last_inv = None
self.last_headers = None
self.last_cmpctblock = None
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
self.connection.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
self.send_message(headers_message)
def request_headers_and_sync(self, locator, hashstop=0):
self.clear_block_announcement()
self.get_headers(locator, hashstop)
assert(wait_until(self.received_block_announcement, timeout=30))
assert(self.received_block_announcement())
self.clear_block_announcement()
# Block until a block announcement for a particular block hash is
# received.
def wait_for_block_announcement(self, block_hash, timeout=30):
def received_hash():
return (block_hash in self.set_announced_blockhashes)
return wait_until(received_hash, timeout=timeout)
class CompactBlocksTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
# Node0 = pre-segwit, node1 = segwit-aware
self.num_nodes = 2
self.utxos = []
def setup_network(self):
self.nodes = []
# Start up node0 to be a version 1, pre-segwit node.
self.nodes = start_nodes(self.num_nodes, self.options.tmpdir,
[["-debug", "-logtimemicros=1", "-bip9params=segwit:0:0"],
["-debug", "-logtimemicros", "-txindex"]])
connect_nodes(self.nodes[0], 1)
def build_block_on_tip(self, node, segwit=False):
height = node.getblockcount()
tip = node.getbestblockhash()
mtp = node.getblockheader(tip)['mediantime']
block = create_block(int(tip, 16), create_coinbase(height + 1), mtp + 1)
block.nVersion = 4
if segwit:
add_witness_commitment(block)
block.solve()
return block
# Create 10 more anyone-can-spend utxo's for testing.
def make_utxos(self):
# Doesn't matter which node we use, just use node0.
block = self.build_block_on_tip(self.nodes[0])
self.test_node.send_and_ping(msg_block(block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.sha256)
self.nodes[0].generate(100)
total_value = block.vtx[0].vout[0].nValue
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
for i in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
block2 = self.build_block_on_tip(self.nodes[0])
block2.vtx.append(tx)
block2.hashMerkleRoot = block2.calc_merkle_root()
block2.solve()
self.test_node.send_and_ping(msg_block(block2))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block2.sha256)
self.utxos.extend([[tx.sha256, i, out_value] for i in range(10)])
return
# Test "sendcmpct" (between peers preferring the same version):
# - No compact block announcements unless sendcmpct is sent.
# - If sendcmpct is sent with version > preferred_version, the message is ignored.
# - If sendcmpct is sent with boolean 0, then block announcements are not
# made with compact blocks.
# - If sendcmpct is then sent with boolean 1, then new block announcements
# are made with compact blocks.
# If old_node is passed in, request compact blocks with version=preferred-1
# and verify that it receives block announcements via compact block.
def test_sendcmpct(self, node, test_node, preferred_version, old_node=None):
# Make sure we get a SENDCMPCT message from our peer
def received_sendcmpct():
return (len(test_node.last_sendcmpct) > 0)
got_message = wait_until(received_sendcmpct, timeout=30)
assert(received_sendcmpct())
assert(got_message)
with mininode_lock:
# Check that the first version received is the preferred one
assert_equal(test_node.last_sendcmpct[0].version, preferred_version)
# And that we receive versions down to 1.
assert_equal(test_node.last_sendcmpct[-1].version, 1)
test_node.last_sendcmpct = []
tip = int(node.getbestblockhash(), 16)
def check_announcement_of_new_block(node, peer, predicate):
peer.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
peer.wait_for_block_announcement(block_hash, timeout=30)
assert(peer.block_announced)
assert(got_message)
with mininode_lock:
assert predicate(peer), (
"block_hash={!r}, cmpctblock={!r}, inv={!r}".format(
block_hash, peer.last_cmpctblock, peer.last_inv))
# We shouldn't get any block announcements via cmpctblock yet.
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
# Try one more time, this time after requesting headers.
test_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_inv is not None)
# Test a few ways of using sendcmpct that should NOT
# result in compact block announcements.
# Before each test, sync the headers chain.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with too-high version
sendcmpct = msg_sendcmpct()
sendcmpct.version = preferred_version+1
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Now try a SENDCMPCT message with valid version, but announce=False
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None)
# Headers sync before next test.
test_node.request_headers_and_sync(locator=[tip])
# Finally, try a SENDCMPCT message with announce=True
sendcmpct.version = preferred_version
sendcmpct.announce = True
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
# Try one more time (no headers sync should be needed!)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
# Try one more time, after turning on sendheaders
test_node.send_and_ping(msg_sendheaders())
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
# Try one more time, after sending a version-1, announce=false message.
sendcmpct.version = preferred_version-1
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is not None)
# Now turn off announcements
sendcmpct.version = preferred_version
sendcmpct.announce = False
test_node.send_and_ping(sendcmpct)
check_announcement_of_new_block(node, test_node, lambda p: p.last_cmpctblock is None and p.last_headers is not None)
if old_node is not None:
# Verify that a peer using an older protocol version can receive
# announcements from this node.
sendcmpct.version = preferred_version-1
sendcmpct.announce = True
old_node.send_and_ping(sendcmpct)
# Header sync
old_node.request_headers_and_sync(locator=[tip])
check_announcement_of_new_block(node, old_node, lambda p: p.last_cmpctblock is not None)
# This test actually causes bitcoind to (reasonably!) disconnect us, so do this last.
def test_invalid_cmpctblock_message(self):
self.nodes[0].generate(101)
block = self.build_block_on_tip(self.nodes[0])
cmpct_block = P2PHeaderAndShortIDs()
cmpct_block.header = CBlockHeader(block)
cmpct_block.prefilled_txn_length = 1
# This index will be too high
prefilled_txn = PrefilledTransaction(1, block.vtx[0])
cmpct_block.prefilled_txn = [prefilled_txn]
self.test_node.send_and_ping(msg_cmpctblock(cmpct_block))
assert(int(self.nodes[0].getbestblockhash(), 16) == block.hashPrevBlock)
# Compare the generated shortids to what we expect based on BIP 152, given
# bitcoind's choice of nonce.
def test_compactblock_construction(self, node, test_node, version, use_witness_address):
# Generate a bunch of transactions.
node.generate(101)
num_transactions = 25
address = node.getnewaddress()
if use_witness_address:
# Want at least one segwit spend, so move all funds to
# a witness address.
address = node.addwitnessaddress(address)
value_to_send = node.getbalance()
node.sendtoaddress(address, satoshi_round(value_to_send-Decimal(0.1)))
node.generate(1)
segwit_tx_generated = False
for i in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
if not tx.wit.is_null():
segwit_tx_generated = True
if use_witness_address:
assert(segwit_tx_generated) # check that our test is not broken
# Wait until we've seen the block announcement for the resulting tip
tip = int(node.getbestblockhash(), 16)
assert(test_node.wait_for_block_announcement(tip))
# Make sure we will receive a fast-announce compact block
self.request_cb_announcements(test_node, node, version)
# Now mine a block, and look at the resulting compact block.
test_node.clear_block_announcement()
block_hash = int(node.generate(1)[0], 16)
# Store the raw block in our internal format.
block = FromHex(CBlock(), node.getblock("%02x" % block_hash, False))
[tx.calc_sha256() for tx in block.vtx]
block.rehash()
# Wait until the block was announced (via compact blocks)
wait_until(test_node.received_block_announcement, timeout=30)
assert(test_node.received_block_announcement())
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert(test_node.last_cmpctblock is not None)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_cmpctblock.header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
# Now fetch the compact block using a normal non-announce getdata
with mininode_lock:
test_node.clear_block_announcement()
inv = CInv(4, block_hash) # 4 == "CompactBlock"
test_node.send_message(msg_getdata([inv]))
wait_until(test_node.received_block_announcement, timeout=30)
assert(test_node.received_block_announcement())
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
assert(test_node.last_cmpctblock is not None)
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_cmpctblock.header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
def check_compactblock_construction_from_block(self, version, header_and_shortids, block_hash, block):
# Check that we got the right block!
header_and_shortids.header.calc_sha256()
assert_equal(header_and_shortids.header.sha256, block_hash)
# Make sure the prefilled_txn appears to have included the coinbase
assert(len(header_and_shortids.prefilled_txn) >= 1)
assert_equal(header_and_shortids.prefilled_txn[0].index, 0)
# Check that all prefilled_txn entries match what's in the block.
for entry in header_and_shortids.prefilled_txn:
entry.tx.calc_sha256()
# This checks the non-witness parts of the tx agree
assert_equal(entry.tx.sha256, block.vtx[entry.index].sha256)
# And this checks the witness
wtxid = entry.tx.calc_sha256(True)
if version == 2:
assert_equal(wtxid, block.vtx[entry.index].calc_sha256(True))
else:
# Shouldn't have received a witness
assert(entry.tx.wit.is_null())
# Check that the cmpctblock message announced all the transactions.
assert_equal(len(header_and_shortids.prefilled_txn) + len(header_and_shortids.shortids), len(block.vtx))
# And now check that all the shortids are as expected as well.
# Determine the siphash keys to use.
[k0, k1] = header_and_shortids.get_siphash_keys()
index = 0
while index < len(block.vtx):
if (len(header_and_shortids.prefilled_txn) > 0 and
header_and_shortids.prefilled_txn[0].index == index):
# Already checked prefilled transactions above
header_and_shortids.prefilled_txn.pop(0)
else:
tx_hash = block.vtx[index].sha256
if version == 2:
tx_hash = block.vtx[index].calc_sha256(True)
shortid = calculate_shortid(k0, k1, tx_hash)
assert_equal(shortid, header_and_shortids.shortids[0])
header_and_shortids.shortids.pop(0)
index += 1
# Test that bitcoind requests compact blocks when we announce new blocks
# via header or inv, and that responding to getblocktxn causes the block
# to be successfully reconstructed.
# Post-segwit: upgraded nodes would only make this request of cb-version-2,
# NODE_WITNESS peers. Unupgraded nodes would still make this request of
# any cb-version-1-supporting peer.
def test_compactblock_requests(self, node, test_node, version, segwit):
# Try announcing a block with an inv or header, expect a compactblock
# request
for announce in ["inv", "header"]:
block = self.build_block_on_tip(node, segwit=segwit)
with mininode_lock:
test_node.last_getdata = None
if announce == "inv":
test_node.send_message(msg_inv([CInv(2, block.sha256)]))
success = wait_until(lambda: test_node.last_getheaders is not None, timeout=30)
assert(success)
test_node.send_header_for_blocks([block])
else:
test_node.send_header_for_blocks([block])
success = wait_until(lambda: test_node.last_getdata is not None, timeout=30)
assert(success)
assert_equal(len(test_node.last_getdata.inv), 1)
assert_equal(test_node.last_getdata.inv[0].type, 4)
assert_equal(test_node.last_getdata.inv[0].hash, block.sha256)
# Send back a compactblock message that omits the coinbase
comp_block = HeaderAndShortIDs()
comp_block.header = CBlockHeader(block)
comp_block.nonce = 0
[k0, k1] = comp_block.get_siphash_keys()
coinbase_hash = block.vtx[0].sha256
if version == 2:
coinbase_hash = block.vtx[0].calc_sha256(True)
comp_block.shortids = [
calculate_shortid(k0, k1, coinbase_hash) ]
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# Expect a getblocktxn message.
with mininode_lock:
assert(test_node.last_getblocktxn is not None)
absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute()
assert_equal(absolute_indexes, [0]) # should be a coinbase request
# Send the coinbase, and verify that the tip advances.
if version == 2:
msg = msg_witness_blocktxn()
else:
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = [block.vtx[0]]
test_node.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
# Create a chain of transactions from given utxo, and add to a new block.
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
for i in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE])))
tx.rehash()
utxo = [tx.sha256, 0, tx.vout[0].nValue]
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
return block
# Test that we only receive getblocktxn requests for transactions that the
# node needs, and that responding to them causes the block to be
# reconstructed.
def test_getblocktxn_requests(self, node, test_node, version):
with_witness = (version==2)
def test_getblocktxn_response(compact_block, peer, expected_result):
msg = msg_cmpctblock(compact_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert(peer.last_getblocktxn is not None)
absolute_indexes = peer.last_getblocktxn.block_txn_request.to_absolute()
assert_equal(absolute_indexes, expected_result)
def test_tip_after_message(node, peer, msg, tip):
peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), tip)
# First try announcing compactblocks that won't reconstruct, and verify
# that we receive getblocktxn messages back.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [1, 2, 3, 4, 5])
msg_bt = msg_blocktxn()
if with_witness:
msg_bt = msg_witness_blocktxn() # serialize with witnesses
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[1:])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now try interspersing the prefilled transactions
comp_block.initialize_from_block(block, prefill_list=[0, 1, 5], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [2, 3, 4])
msg_bt.block_transactions = BlockTransactions(block.sha256, block.vtx[2:5])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now try giving one transaction ahead of time.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
test_node.send_and_ping(msg_tx(block.vtx[1]))
assert(block.vtx[1].hash in node.getrawmempool())
# Prefill 4 out of the 6 transactions, and verify that only the one
# that was not in the mempool is requested.
comp_block.initialize_from_block(block, prefill_list=[0, 2, 3, 4], use_witness=with_witness)
test_getblocktxn_response(comp_block, test_node, [5])
msg_bt.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]])
test_tip_after_message(node, test_node, msg_bt, block.sha256)
# Now provide all transactions to the node before the block is
# announced and verify reconstruction happens immediately.
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
for tx in block.vtx[1:]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
# Clear out last request.
with mininode_lock:
test_node.last_getblocktxn = None
# Send compact block
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=with_witness)
test_tip_after_message(node, test_node, msg_cmpctblock(comp_block.to_p2p()), block.sha256)
with mininode_lock:
# Shouldn't have gotten a request for any transaction
assert(test_node.last_getblocktxn is None)
# Incorrectly responding to a getblocktxn shouldn't cause the block to be
# permanently failed.
def test_incorrect_blocktxn_response(self, node, test_node, version):
if (len(self.utxos) == 0):
self.make_utxos()
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Relay the first 5 transactions from the block in advance
for tx in block.vtx[1:6]:
test_node.send_message(msg_tx(tx))
test_node.sync_with_ping()
# Make sure all transactions were accepted.
mempool = node.getrawmempool()
for tx in block.vtx[1:6]:
assert(tx.hash in mempool)
# Send compact block
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0], use_witness=(version == 2))
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
absolute_indexes = []
with mininode_lock:
assert(test_node.last_getblocktxn is not None)
absolute_indexes = test_node.last_getblocktxn.block_txn_request.to_absolute()
assert_equal(absolute_indexes, [6, 7, 8, 9, 10])
# Now give an incorrect response.
# Note that it's possible for bitcoind to be smart enough to know we're
# lying, since it could check to see if the shortid matches what we're
# sending, and eg disconnect us for misbehavior. If that behavior
# change were made, we could just modify this test by having a
# different peer provide the block further down, so that we're still
# verifying that the block isn't marked bad permanently. This is good
# enough for now.
msg = msg_blocktxn()
if version==2:
msg = msg_witness_blocktxn()
msg.block_transactions = BlockTransactions(block.sha256, [block.vtx[5]] + block.vtx[7:])
test_node.send_and_ping(msg)
# Tip should not have updated
assert_equal(int(node.getbestblockhash(), 16), block.hashPrevBlock)
# We should receive a getdata request
success = wait_until(lambda: test_node.last_getdata is not None, timeout=10)
assert(success)
assert_equal(len(test_node.last_getdata.inv), 1)
assert(test_node.last_getdata.inv[0].type == 2 or test_node.last_getdata.inv[0].type == 2|MSG_WITNESS_FLAG)
assert_equal(test_node.last_getdata.inv[0].hash, block.sha256)
# Deliver the block
if version==2:
test_node.send_and_ping(msg_witness_block(block))
else:
test_node.send_and_ping(msg_block(block))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def test_getblocktxn_handler(self, node, test_node, version):
# bitcoind will not send blocktxn responses for blocks whose height is
# more than 10 blocks deep.
MAX_GETBLOCKTXN_DEPTH = 10
chain_height = node.getblockcount()
current_height = chain_height
while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH):
block_hash = node.getblockhash(current_height)
block = FromHex(CBlock(), node.getblock(block_hash, False))
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [])
num_to_request = random.randint(1, len(block.vtx))
msg.block_txn_request.from_absolute(sorted(random.sample(range(len(block.vtx)), num_to_request)))
test_node.send_message(msg)
success = wait_until(lambda: test_node.last_blocktxn is not None, timeout=10)
assert(success)
[tx.calc_sha256() for tx in block.vtx]
with mininode_lock:
assert_equal(test_node.last_blocktxn.block_transactions.blockhash, int(block_hash, 16))
all_indices = msg.block_txn_request.to_absolute()
for index in all_indices:
tx = test_node.last_blocktxn.block_transactions.transactions.pop(0)
tx.calc_sha256()
assert_equal(tx.sha256, block.vtx[index].sha256)
if version == 1:
# Witnesses should have been stripped
assert(tx.wit.is_null())
else:
# Check that the witness matches
assert_equal(tx.calc_sha256(True), block.vtx[index].calc_sha256(True))
test_node.last_blocktxn = None
current_height -= 1
# Next request should send a full block response, as we're past the
# allowed depth for a blocktxn response.
block_hash = node.getblockhash(current_height)
msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), [0])
with mininode_lock:
test_node.last_block = None
test_node.last_blocktxn = None
test_node.send_and_ping(msg)
with mininode_lock:
test_node.last_block.block.calc_sha256()
assert_equal(test_node.last_block.block.sha256, int(block_hash, 16))
assert_equal(test_node.last_blocktxn, None)
def test_compactblocks_not_at_tip(self, node, test_node):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
success = wait_until(lambda: test_node.last_cmpctblock is not None, timeout=30)
assert(success)
test_node.clear_block_announcement()
node.generate(1)
wait_until(test_node.received_block_announcement, timeout=30)
test_node.clear_block_announcement()
with mininode_lock:
test_node.last_block = None
test_node.send_message(msg_getdata([CInv(4, int(new_blocks[0], 16))]))
success = wait_until(lambda: test_node.last_block is not None, timeout=30)
assert(success)
with mininode_lock:
test_node.last_block.block.calc_sha256()
assert_equal(test_node.last_block.block.sha256, int(new_blocks[0], 16))
# Generate an old compactblock, and verify that it's not accepted.
cur_height = node.getblockcount()
hashPrevBlock = int(node.getblockhash(cur_height-5), 16)
block = self.build_block_on_tip(node)
block.hashPrevBlock = hashPrevBlock
block.solve()
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block)
test_node.send_and_ping(msg_cmpctblock(comp_block.to_p2p()))
tips = node.getchaintips()
found = False
for x in tips:
if x["hash"] == block.hash:
assert_equal(x["status"], "headers-only")
found = True
break
assert(found)
# Requesting this block via getblocktxn should silently fail
# (to avoid fingerprinting attacks).
msg = msg_getblocktxn()
msg.block_txn_request = BlockTransactionsRequest(block.sha256, [0])
with mininode_lock:
test_node.last_blocktxn = None
test_node.send_and_ping(msg)
with mininode_lock:
assert(test_node.last_blocktxn is None)
def activate_segwit(self, node):
node.generate(144*3)
assert_equal(get_bip9_status(node, "segwit")["status"], 'active')
def test_end_to_end_block_relay(self, node, listeners):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 10)
[l.clear_block_announcement() for l in listeners]
# ToHex() won't serialize with witness, but this block has no witnesses
# anyway. TODO: repeat this test with witness tx's to a segwit node.
node.submitblock(ToHex(block))
for l in listeners:
wait_until(lambda: l.received_block_announcement(), timeout=30)
with mininode_lock:
for l in listeners:
assert(l.last_cmpctblock is not None)
l.last_cmpctblock.header_and_shortids.header.calc_sha256()
assert_equal(l.last_cmpctblock.header_and_shortids.header.sha256, block.sha256)
# Test that we don't get disconnected if we relay a compact block with valid header,
# but invalid transactions.
def test_invalid_tx_in_compactblock(self, node, test_node, use_segwit):
assert(len(self.utxos))
utxo = self.utxos[0]
block = self.build_block_with_transactions(node, utxo, 5)
del block.vtx[3]
block.hashMerkleRoot = block.calc_merkle_root()
if use_segwit:
# If we're testing with segwit, also drop the coinbase witness,
# but include the witness commitment.
add_witness_commitment(block)
block.vtx[0].wit.vtxinwit = []
block.solve()
# Now send the compact block with all transactions prefilled, and
# verify that we don't get disconnected.
comp_block = HeaderAndShortIDs()
comp_block.initialize_from_block(block, prefill_list=[0, 1, 2, 3, 4], use_witness=use_segwit)
msg = msg_cmpctblock(comp_block.to_p2p())
test_node.send_and_ping(msg)
# Check that the tip didn't advance
assert(int(node.getbestblockhash(), 16) is not block.sha256)
test_node.sync_with_ping()
# Helper for enabling cb announcements
# Send the sendcmpct request and sync headers
def request_cb_announcements(self, peer, node, version):
tip = node.getbestblockhash()
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
msg = msg_sendcmpct()
msg.version = version
msg.announce = True
peer.send_and_ping(msg)
def test_compactblock_reconstruction_multiple_peers(self, node, stalling_peer, delivery_peer):
assert(len(self.utxos))
def announce_cmpct_block(node, peer):
utxo = self.utxos.pop(0)
block = self.build_block_with_transactions(node, utxo, 5)
cmpct_block = HeaderAndShortIDs()
cmpct_block.initialize_from_block(block)
msg = msg_cmpctblock(cmpct_block.to_p2p())
peer.send_and_ping(msg)
with mininode_lock:
assert(peer.last_getblocktxn is not None)
return block, cmpct_block
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
mempool = node.getrawmempool()
for tx in block.vtx[1:]:
assert(tx.hash in mempool)
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
# Now test that delivering an invalid compact block won't break relay
block, cmpct_block = announce_cmpct_block(node, stalling_peer)
for tx in block.vtx[1:]:
delivery_peer.send_message(msg_tx(tx))
delivery_peer.sync_with_ping()
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit = [ CTxInWitness() ]
cmpct_block.prefilled_txn[0].tx.wit.vtxinwit[0].scriptWitness.stack = [ser_uint256(0)]
cmpct_block.use_witness = True
delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
assert(int(node.getbestblockhash(), 16) != block.sha256)
msg = msg_blocktxn()
msg.block_transactions.blockhash = block.sha256
msg.block_transactions.transactions = block.vtx[1:]
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
def run_test(self):
# Setup the p2p connections and start up the network thread.
self.test_node = TestNode()
self.segwit_node = TestNode()
self.old_node = TestNode() # version 1 peer <--> segwit node
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], self.test_node))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.segwit_node, services=NODE_NETWORK|NODE_WITNESS))
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1],
self.old_node, services=NODE_NETWORK))
self.test_node.add_connection(connections[0])
self.segwit_node.add_connection(connections[1])
self.old_node.add_connection(connections[2])
NetworkThread().start() # Start up network handling in another thread
# Test logic begins here
self.test_node.wait_for_verack()
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
print("Running tests, pre-segwit activation:")
print("\tTesting SENDCMPCT p2p message... ")
self.test_sendcmpct(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_sendcmpct(self.nodes[1], self.segwit_node, 2, old_node=self.old_node)
sync_blocks(self.nodes)
print("\tTesting compactblock construction...")
self.test_compactblock_construction(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
print("\tTesting compactblock requests... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, False)
sync_blocks(self.nodes)
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, False)
sync_blocks(self.nodes)
print("\tTesting getblocktxn requests...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
print("\tTesting getblocktxn handler...")
self.test_getblocktxn_handler(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
sync_blocks(self.nodes)
print("\tTesting compactblock requests/announcements not at chain tip...")
self.test_compactblocks_not_at_tip(self.nodes[0], self.test_node)
sync_blocks(self.nodes)
self.test_compactblocks_not_at_tip(self.nodes[1], self.segwit_node)
self.test_compactblocks_not_at_tip(self.nodes[1], self.old_node)
sync_blocks(self.nodes)
print("\tTesting handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.nodes[0], self.test_node, 1)
sync_blocks(self.nodes)
self.test_incorrect_blocktxn_response(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
# End-to-end block relay tests
print("\tTesting end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[0], [self.segwit_node, self.test_node, self.old_node])
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
print("\tTesting handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, False)
print("\tTesting reconstructing compact blocks from all peers...")
self.test_compactblock_reconstruction_multiple_peers(self.nodes[1], self.segwit_node, self.old_node)
sync_blocks(self.nodes)
# Advance to segwit activation
print ("\nAdvancing to segwit activation\n")
self.activate_segwit(self.nodes[1])
print ("Running tests, post-segwit activation...")
print("\tTesting compactblock construction...")
self.test_compactblock_construction(self.nodes[1], self.old_node, 1, True)
self.test_compactblock_construction(self.nodes[1], self.segwit_node, 2, True)
sync_blocks(self.nodes)
print("\tTesting compactblock requests (unupgraded node)... ")
self.test_compactblock_requests(self.nodes[0], self.test_node, 1, True)
print("\tTesting getblocktxn requests (unupgraded node)...")
self.test_getblocktxn_requests(self.nodes[0], self.test_node, 1)
# Need to manually sync node0 and node1, because post-segwit activation,
# node1 will not download blocks from node0.
print("\tSyncing nodes...")
assert(self.nodes[0].getbestblockhash() != self.nodes[1].getbestblockhash())
while (self.nodes[0].getblockcount() > self.nodes[1].getblockcount()):
block_hash = self.nodes[0].getblockhash(self.nodes[1].getblockcount()+1)
self.nodes[1].submitblock(self.nodes[0].getblock(block_hash, False))
assert_equal(self.nodes[0].getbestblockhash(), self.nodes[1].getbestblockhash())
print("\tTesting compactblock requests (segwit node)... ")
self.test_compactblock_requests(self.nodes[1], self.segwit_node, 2, True)
print("\tTesting getblocktxn requests (segwit node)...")
self.test_getblocktxn_requests(self.nodes[1], self.segwit_node, 2)
sync_blocks(self.nodes)
print("\tTesting getblocktxn handler (segwit node should return witnesses)...")
self.test_getblocktxn_handler(self.nodes[1], self.segwit_node, 2)
self.test_getblocktxn_handler(self.nodes[1], self.old_node, 1)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
# (Post-segwit activation, blocks won't propagate from node0 to node1
# automatically, so don't bother testing a block announced to node0.)
print("\tTesting end-to-end block relay...")
self.request_cb_announcements(self.test_node, self.nodes[0], 1)
self.request_cb_announcements(self.old_node, self.nodes[1], 1)
self.request_cb_announcements(self.segwit_node, self.nodes[1], 2)
self.test_end_to_end_block_relay(self.nodes[1], [self.segwit_node, self.test_node, self.old_node])
print("\tTesting handling of invalid compact blocks...")
self.test_invalid_tx_in_compactblock(self.nodes[0], self.test_node, False)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.segwit_node, True)
self.test_invalid_tx_in_compactblock(self.nodes[1], self.old_node, True)
print("\tTesting invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
if __name__ == '__main__':
CompactBlocksTest().main()
| 45.593395 | 124 | 0.665912 |
7956207ffed6ff3498a0f513901172daffcb39cd | 4,075 | py | Python | neutronclient/neutron/v2_0/qos/qos.py | teresa-ho/stx-python-neutronclient | 35ea6c2c96cbf98755a82cb7c19138648552b778 | [
"Apache-2.0"
] | null | null | null | neutronclient/neutron/v2_0/qos/qos.py | teresa-ho/stx-python-neutronclient | 35ea6c2c96cbf98755a82cb7c19138648552b778 | [
"Apache-2.0"
] | null | null | null | neutronclient/neutron/v2_0/qos/qos.py | teresa-ho/stx-python-neutronclient | 35ea6c2c96cbf98755a82cb7c19138648552b778 | [
"Apache-2.0"
] | null | null | null | # Copyright 2012 OpenStack Foundation.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Copyright (c) 2013-2014 Wind River Systems, Inc.
#
#
import logging
from neutronclient.common import constants
from neutronclient.neutron import v2_0 as neutronV20
class UpdateQoSMixin(object):
def add_arguments_qos(self, parser):
parser.add_argument('--name', metavar='NAME',
help='Name of QoS policy')
parser.add_argument('--description', metavar='DESCRIPTION',
help="Description of QoS policy", required=False)
parser.add_argument('--dscp', metavar="POLICY",
help='Set of policies for dscp',
nargs='+', required=False)
parser.add_argument('--ratelimit', metavar="POLICY",
help='Set of policies for ratelimit',
nargs='+', required=False)
parser.add_argument('--scheduler', metavar="POLICY",
help='Set of policies for scheduler',
nargs='+', required=False)
def _args2body_policies(self, qos, type, policies):
qos['policies'][type] = {}
for parg in policies:
args = parg.split('=')
qos['policies'][type][args[0]] = args[1]
def args2body_qos(self, parsed_args, qos):
if parsed_args.name:
qos['name'] = parsed_args.name
if parsed_args.description:
qos['description'] = parsed_args.description
qos['policies'] = {}
if parsed_args.dscp:
self._args2body_policies(qos, constants.TYPE_QOS_DSCP,
parsed_args.dscp)
if parsed_args.ratelimit:
self._args2body_policies(qos, constants.TYPE_QOS_RATELIMIT,
parsed_args.ratelimit)
if parsed_args.scheduler:
self._args2body_policies(qos, constants.TYPE_QOS_SCHEDULER,
parsed_args.scheduler)
class ListQoS(neutronV20.ListCommand):
"""List QoS policies."""
resource = 'qos'
log = logging.getLogger(__name__ + '.ListQoS')
list_columns = [
'id', 'name', 'description'
]
class ShowQoS(neutronV20.ShowCommand):
"""Show QoS policy."""
resource = 'qos'
allow_names = True
log = logging.getLogger(__name__ + '.ShowQoS')
class DeleteQoS(neutronV20.DeleteCommand):
"""Delete QoS policy."""
resource = 'qos'
allow_names = True
log = logging.getLogger(__name__ + '.DeleteQoS')
class UpdateQoS(neutronV20.UpdateCommand, UpdateQoSMixin):
"""Update QoS policy."""
resource = 'qos'
log = logging.getLogger(__name__ + '.UpdateQoS')
def add_known_arguments(self, parser):
self.add_arguments_qos(parser)
def args2body(self, parsed_args):
body = {self.resource: {}}
self.args2body_qos(parsed_args, body[self.resource])
return body
class CreateQoS(neutronV20.CreateCommand, UpdateQoSMixin):
"""Create QoS policy."""
resource = 'qos'
log = logging.getLogger(__name__ + '.CreateQoS')
def add_known_arguments(self, parser):
self.add_arguments_qos(parser)
def args2body(self, parsed_args):
body = {self.resource: {}}
if parsed_args.tenant_id:
body[self.resource]['tenant_id'] = parsed_args.tenant_id
self.args2body_qos(parsed_args, body[self.resource])
return body
| 32.6 | 78 | 0.617178 |
795620c2e6681f8e5be0df43a1a86977636ea1e9 | 1,056 | py | Python | examples/notify.py | dduong42/aiopg | 3fb3256319de766384b4867c7cc6710397bd1a8c | [
"BSD-2-Clause"
] | 1 | 2021-07-07T15:00:32.000Z | 2021-07-07T15:00:32.000Z | examples/notify.py | dduong42/aiopg | 3fb3256319de766384b4867c7cc6710397bd1a8c | [
"BSD-2-Clause"
] | null | null | null | examples/notify.py | dduong42/aiopg | 3fb3256319de766384b4867c7cc6710397bd1a8c | [
"BSD-2-Clause"
] | null | null | null | import asyncio
import aiopg
dsn = 'dbname=aiopg user=aiopg password=passwd host=127.0.0.1'
async def notify(conn):
async with conn.cursor() as cur:
for i in range(5):
msg = "message {}".format(i)
print('Send ->', msg)
await cur.execute("NOTIFY channel, %s", (msg,))
await cur.execute("NOTIFY channel, 'finish'")
async def listen(conn):
async with conn.cursor() as cur:
await cur.execute("LISTEN channel")
while True:
msg = await conn.notifies.get()
if msg.payload == 'finish':
return
else:
print('Receive <-', msg.payload)
async def main():
async with aiopg.create_pool(dsn) as pool:
async with pool.acquire() as conn1:
listener = listen(conn1)
async with pool.acquire() as conn2:
notifier = notify(conn2)
await asyncio.gather(listener, notifier)
print("ALL DONE")
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
| 25.756098 | 62 | 0.574811 |
79562137f33349d4816b7205ab979a8f53068c08 | 1,980 | py | Python | images/spark/outbound-relay/http_sink.py | splunk/deep-learning-toolkit | 84f9c978d9859a96f6ba566737a5c7102738d13c | [
"Apache-2.0"
] | 11 | 2020-10-13T05:27:59.000Z | 2021-09-23T02:56:32.000Z | images/spark/outbound-relay/http_sink.py | splunk/deep-learning-toolkit | 84f9c978d9859a96f6ba566737a5c7102738d13c | [
"Apache-2.0"
] | 48 | 2020-10-15T09:53:36.000Z | 2021-07-05T15:33:24.000Z | images/spark/outbound-relay/http_sink.py | splunk/deep-learning-toolkit | 84f9c978d9859a96f6ba566737a5c7102738d13c | [
"Apache-2.0"
] | 4 | 2020-12-04T08:51:35.000Z | 2022-03-27T09:42:20.000Z | from waitress import serve
from flask import Flask, request
from queue import Empty
import logging
import threading
import http
class HTTPSink(object):
def __init__(self, queue, status):
self.queue = queue
self.status = status
self.total_bytes = 0
self.chunk_count = 0
self.lock = threading.Lock()
self.thread = threading.Thread(target=self.run, args=())
self.thread.daemon = True
self.thread.start()
def run(self):
logging.info("HTTPSink: starting ...")
app = Flask(__name__)
app.add_url_rule('/ping', view_func=self.ping_handler, methods=["GET"])
app.add_url_rule('/pull', view_func=self.pull_handler, methods=["POST"])
serve(
app,
host="0.0.0.0",
port=8890,
channel_timeout=100000,
# threads=concurrent_algo_executions,
)
def ping_handler(self):
return '', http.HTTPStatus.OK
def pull_handler(self):
try:
chunk = self.queue.get_nowait()
except Empty:
msg = self.status.source_error_message
if msg:
logging.info("HTTPSink: informed Splunk that there is an error: %s" % msg)
return msg, http.HTTPStatus.INTERNAL_SERVER_ERROR
if self.status.source_sent_all_data:
logging.info("HTTPSink: informed Splunk that there won't be more data")
return '', http.HTTPStatus.GONE
return '', http.HTTPStatus.NO_CONTENT
chunk_size = len(chunk)
self.lock.acquire()
self.total_bytes += chunk_size
#total_bytes = self.total_bytes
self.chunk_count += 1
chunk_count = self.chunk_count
self.lock.release()
logging.info("HTTPSink: will send chunk %s (%s bytes) to Splunk" % (
chunk_count,
chunk_size,
))
self.queue.task_done()
return chunk, 200
| 30.461538 | 90 | 0.591414 |
795621452f953d45596d63123d768e81b03978d3 | 1,116 | py | Python | LectureNote/06.NonLinearDataStructure/14.Tree/hchang/#14/52.py | minssoj/Study_turtleCoding | 5a1ab3c099311e6417f2a07e87132840fc3c7aed | [
"MIT"
] | null | null | null | LectureNote/06.NonLinearDataStructure/14.Tree/hchang/#14/52.py | minssoj/Study_turtleCoding | 5a1ab3c099311e6417f2a07e87132840fc3c7aed | [
"MIT"
] | null | null | null | LectureNote/06.NonLinearDataStructure/14.Tree/hchang/#14/52.py | minssoj/Study_turtleCoding | 5a1ab3c099311e6417f2a07e87132840fc3c7aed | [
"MIT"
] | 2 | 2021-12-13T08:02:31.000Z | 2021-12-18T08:36:23.000Z | # 이진탐색 트리(BST)가 주어졌을 때, L 이상 R 이하의 값을 지닌 노드의 합을 구하여라.
from BinaryTree import TreeNode
from time import time
null = "null"
tree = [10,5,15,3,7,null,18]
L = 7
R = 15
root=TreeNode(tree)
root.print_tree()
st = time()
lst = root.inorder()
print(lst)
answer = 0
for i in lst:
value = i.val
if type(value) is int and L <= value <= R:
answer += value
print(answer)
print(time() -st)
lst = root.breadth_first_search_list()
print(lst)
answer = 0
for i in lst:
value = i.val
if type(value) is int and 7 <= value <= 15:
answer += value
print(answer) # 일을 두번한다.
st = time()
def search_sum(root, L, R, task=[]):
task.append(root)
answer = 0
while task:
root = task.pop()
if not root: continue
if type(root.val) is int and L <= root.val <= R:
answer += root.val
if type(root.val) is int and L <= root.val:
task.append(root.left)
if type(root.val) is int and root.val <= R:
task.append(root.right)
return answer
print(search_sum(root, L, R))
print(time() -st) | 18.6 | 56 | 0.575269 |
79562157af423b0fdbb28ceaf93d353102cb7b59 | 27,411 | py | Python | tensorboard/uploader/uploader_main.py | tjgq/tensorboard | 751c961b90183115e4ab0ae3975d50146c0705b9 | [
"Apache-2.0"
] | 7 | 2020-04-04T16:25:42.000Z | 2021-10-02T18:26:56.000Z | tensorboard/uploader/uploader_main.py | tjgq/tensorboard | 751c961b90183115e4ab0ae3975d50146c0705b9 | [
"Apache-2.0"
] | null | null | null | tensorboard/uploader/uploader_main.py | tjgq/tensorboard | 751c961b90183115e4ab0ae3975d50146c0705b9 | [
"Apache-2.0"
] | 4 | 2020-08-08T18:08:44.000Z | 2021-05-13T05:22:40.000Z | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Main program for the TensorBoard.dev uploader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import json
import os
import sys
import textwrap
from absl import app
from absl import logging
from absl.flags import argparse_flags
import grpc
import six
from tensorboard.uploader import dev_creds
from tensorboard.uploader.proto import experiment_pb2
from tensorboard.uploader.proto import export_service_pb2_grpc
from tensorboard.uploader.proto import write_service_pb2_grpc
from tensorboard.uploader import auth
from tensorboard.uploader import exporter as exporter_lib
from tensorboard.uploader import server_info as server_info_lib
from tensorboard.uploader import uploader as uploader_lib
from tensorboard.uploader import util
from tensorboard.uploader.proto import server_info_pb2
from tensorboard import program
from tensorboard.plugins import base_plugin
# Temporary integration point for absl compatibility; will go away once
# migrated to TensorBoard subcommand.
_FLAGS = None
_MESSAGE_TOS = u"""\
Your use of this service is subject to Google's Terms of Service
<https://policies.google.com/terms> and Privacy Policy
<https://policies.google.com/privacy>, and TensorBoard.dev's Terms of Service
<https://tensorboard.dev/policy/terms/>.
This notice will not be shown again while you are logged into the uploader.
To log out, run `tensorboard dev auth revoke`.
"""
_SUBCOMMAND_FLAG = "_uploader__subcommand"
_SUBCOMMAND_KEY_UPLOAD = "UPLOAD"
_SUBCOMMAND_KEY_DELETE = "DELETE"
_SUBCOMMAND_KEY_LIST = "LIST"
_SUBCOMMAND_KEY_EXPORT = "EXPORT"
_SUBCOMMAND_KEY_UPDATE_METADATA = "UPDATEMETADATA"
_SUBCOMMAND_KEY_AUTH = "AUTH"
_AUTH_SUBCOMMAND_FLAG = "_uploader__subcommand_auth"
_AUTH_SUBCOMMAND_KEY_REVOKE = "REVOKE"
_DEFAULT_ORIGIN = "https://tensorboard.dev"
# Size limits for input fields not bounded at a wire level. "Chars" in this
# context refers to Unicode code points as stipulated by https://aip.dev/210.
_EXPERIMENT_NAME_MAX_CHARS = 100
_EXPERIMENT_DESCRIPTION_MAX_CHARS = 600
def _prompt_for_user_ack(intent):
"""Prompts for user consent, exiting the program if they decline."""
body = intent.get_ack_message_body()
header = "\n***** TensorBoard Uploader *****\n"
user_ack_message = "\n".join((header, body, _MESSAGE_TOS))
sys.stderr.write(user_ack_message)
sys.stderr.write("\n")
response = six.moves.input("Continue? (yes/NO) ")
if response.lower() not in ("y", "yes"):
sys.exit(0)
sys.stderr.write("\n")
def _define_flags(parser):
"""Configures flags on the provided argument parser.
Integration point for `tensorboard.program`'s subcommand system.
Args:
parser: An `argparse.ArgumentParser` to be mutated.
"""
subparsers = parser.add_subparsers()
parser.add_argument(
"--origin",
type=str,
default="",
help="Experimental. Origin for TensorBoard.dev service to which "
"to connect. If not set, defaults to %r." % _DEFAULT_ORIGIN,
)
parser.add_argument(
"--api_endpoint",
type=str,
default="",
help="Experimental. Direct URL for the API server accepting "
"write requests. If set, will skip initial server handshake "
"unless `--origin` is also set.",
)
parser.add_argument(
"--grpc_creds_type",
type=str,
default="ssl",
choices=("local", "ssl", "ssl_dev"),
help="The type of credentials to use for the gRPC client",
)
parser.add_argument(
"--auth_force_console",
action="store_true",
help="Set to true to force authentication flow to use the "
"--console rather than a browser redirect to localhost.",
)
upload = subparsers.add_parser(
"upload", help="upload an experiment to TensorBoard.dev"
)
upload.set_defaults(**{_SUBCOMMAND_FLAG: _SUBCOMMAND_KEY_UPLOAD})
upload.add_argument(
"--logdir",
metavar="PATH",
type=str,
default=None,
help="Directory containing the logs to process",
)
upload.add_argument(
"--name",
type=str,
default=None,
help="Title of the experiment. Max 100 characters.",
)
upload.add_argument(
"--description",
type=str,
default=None,
help="Experiment description. Markdown format. Max 600 characters.",
)
upload.add_argument(
"--plugins",
type=str,
nargs="*",
default=[],
help="List of plugins for which data should be uploaded. If "
"unspecified then data will be uploaded for all plugins supported by "
"the server.",
)
update_metadata = subparsers.add_parser(
"update-metadata",
help="change the name, description, or other user "
"metadata associated with an experiment.",
)
update_metadata.set_defaults(
**{_SUBCOMMAND_FLAG: _SUBCOMMAND_KEY_UPDATE_METADATA}
)
update_metadata.add_argument(
"--experiment_id",
metavar="EXPERIMENT_ID",
type=str,
default=None,
help="ID of the experiment on which to modify the metadata.",
)
update_metadata.add_argument(
"--name",
type=str,
default=None,
help="Title of the experiment. Max 100 characters.",
)
update_metadata.add_argument(
"--description",
type=str,
default=None,
help="Experiment description. Markdown format. Max 600 characters.",
)
delete = subparsers.add_parser(
"delete",
help="permanently delete an experiment",
inherited_absl_flags=None,
)
delete.set_defaults(**{_SUBCOMMAND_FLAG: _SUBCOMMAND_KEY_DELETE})
# We would really like to call this next flag `--experiment` rather
# than `--experiment_id`, but this is broken inside Google due to a
# long-standing Python bug: <https://bugs.python.org/issue14365>
# (Some Google-internal dependencies define `--experimental_*` flags.)
# This isn't exactly a principled fix, but it gets the job done.
delete.add_argument(
"--experiment_id",
metavar="EXPERIMENT_ID",
type=str,
default=None,
help="ID of an experiment to delete permanently",
)
list_parser = subparsers.add_parser(
"list", help="list previously uploaded experiments"
)
list_parser.set_defaults(**{_SUBCOMMAND_FLAG: _SUBCOMMAND_KEY_LIST})
export = subparsers.add_parser(
"export", help="download all your experiment data"
)
export.set_defaults(**{_SUBCOMMAND_FLAG: _SUBCOMMAND_KEY_EXPORT})
export.add_argument(
"--outdir",
metavar="OUTPUT_PATH",
type=str,
default=None,
help="Directory into which to download all experiment data; "
"must not yet exist",
)
auth_parser = subparsers.add_parser("auth", help="log in, log out")
auth_parser.set_defaults(**{_SUBCOMMAND_FLAG: _SUBCOMMAND_KEY_AUTH})
auth_subparsers = auth_parser.add_subparsers()
auth_revoke = auth_subparsers.add_parser(
"revoke", help="revoke all existing credentials and log out"
)
auth_revoke.set_defaults(
**{_AUTH_SUBCOMMAND_FLAG: _AUTH_SUBCOMMAND_KEY_REVOKE}
)
def _parse_flags(argv=("",)):
"""Integration point for `absl.app`.
Exits if flag values are invalid.
Args:
argv: CLI arguments, as with `sys.argv`, where the first argument is taken
to be the name of the program being executed.
Returns:
Either argv[:1] if argv was non-empty, or [''] otherwise, as a mechanism
for absl.app.run() compatibility.
"""
parser = argparse_flags.ArgumentParser(
prog="uploader",
description=("Upload your TensorBoard experiments to TensorBoard.dev"),
)
_define_flags(parser)
arg0 = argv[0] if argv else ""
global _FLAGS
_FLAGS = parser.parse_args(argv[1:])
return [arg0]
def _run(flags):
"""Runs the main uploader program given parsed flags.
Args:
flags: An `argparse.Namespace`.
"""
logging.set_stderrthreshold(logging.WARNING)
intent = _get_intent(flags)
store = auth.CredentialsStore()
if isinstance(intent, _AuthRevokeIntent):
store.clear()
sys.stderr.write("Logged out of uploader.\n")
sys.stderr.flush()
return
# TODO(b/141723268): maybe reconfirm Google Account prior to reuse.
credentials = store.read_credentials()
if not credentials:
_prompt_for_user_ack(intent)
client_config = json.loads(auth.OAUTH_CLIENT_CONFIG)
flow = auth.build_installed_app_flow(client_config)
credentials = flow.run(force_console=flags.auth_force_console)
sys.stderr.write("\n") # Extra newline after auth flow messages.
store.write_credentials(credentials)
channel_options = None
if flags.grpc_creds_type == "local":
channel_creds = grpc.local_channel_credentials()
elif flags.grpc_creds_type == "ssl":
channel_creds = grpc.ssl_channel_credentials()
elif flags.grpc_creds_type == "ssl_dev":
channel_creds = grpc.ssl_channel_credentials(dev_creds.DEV_SSL_CERT)
channel_options = [("grpc.ssl_target_name_override", "localhost")]
else:
msg = "Invalid --grpc_creds_type %s" % flags.grpc_creds_type
raise base_plugin.FlagsError(msg)
try:
server_info = _get_server_info(flags)
except server_info_lib.CommunicationError as e:
_die(str(e))
_handle_server_info(server_info)
logging.info("Received server info: <%r>", server_info)
if not server_info.api_server.endpoint:
logging.error("Server info response: %s", server_info)
_die("Internal error: frontend did not specify an API server")
composite_channel_creds = grpc.composite_channel_credentials(
channel_creds, auth.id_token_call_credentials(credentials)
)
# TODO(@nfelt): In the `_UploadIntent` case, consider waiting until
# logdir exists to open channel.
channel = grpc.secure_channel(
server_info.api_server.endpoint,
composite_channel_creds,
options=channel_options,
)
with channel:
intent.execute(server_info, channel)
@six.add_metaclass(abc.ABCMeta)
class _Intent(object):
"""A description of the user's intent in invoking this program.
Each valid set of CLI flags corresponds to one intent: e.g., "upload
data from this logdir", or "delete the experiment with that ID".
"""
@abc.abstractmethod
def get_ack_message_body(self):
"""Gets the message to show when executing this intent at first login.
This need not include the header (program name) or Terms of Service
notice.
Returns:
A Unicode string, potentially spanning multiple lines.
"""
pass
@abc.abstractmethod
def execute(self, server_info, channel):
"""Carries out this intent with the specified gRPC channel.
Args:
server_info: A `server_info_pb2.ServerInfoResponse` value.
channel: A connected gRPC channel whose server provides the TensorBoard
reader and writer services.
"""
pass
class _AuthRevokeIntent(_Intent):
"""The user intends to revoke credentials."""
def get_ack_message_body(self):
"""Must not be called."""
raise AssertionError("No user ack needed to revoke credentials")
def execute(self, server_info, channel):
"""Execute handled specially by `main`.
Must not be called.
"""
raise AssertionError(
"_AuthRevokeIntent should not be directly executed"
)
class _DeleteExperimentIntent(_Intent):
"""The user intends to delete an experiment."""
_MESSAGE_TEMPLATE = textwrap.dedent(
u"""\
This will delete the experiment on https://tensorboard.dev with the
following experiment ID:
{experiment_id}
You have chosen to delete an experiment. All experiments uploaded
to TensorBoard.dev are publicly visible. Do not upload sensitive
data.
"""
)
def __init__(self, experiment_id):
self.experiment_id = experiment_id
def get_ack_message_body(self):
return self._MESSAGE_TEMPLATE.format(experiment_id=self.experiment_id)
def execute(self, server_info, channel):
api_client = write_service_pb2_grpc.TensorBoardWriterServiceStub(
channel
)
experiment_id = self.experiment_id
if not experiment_id:
raise base_plugin.FlagsError(
"Must specify a non-empty experiment ID to delete."
)
try:
uploader_lib.delete_experiment(api_client, experiment_id)
except uploader_lib.ExperimentNotFoundError:
_die(
"No such experiment %s. Either it never existed or it has "
"already been deleted." % experiment_id
)
except uploader_lib.PermissionDeniedError:
_die(
"Cannot delete experiment %s because it is owned by a "
"different user." % experiment_id
)
except grpc.RpcError as e:
_die("Internal error deleting experiment: %s" % e)
print("Deleted experiment %s." % experiment_id)
class _UpdateMetadataIntent(_Intent):
"""The user intends to update the metadata for an experiment."""
_MESSAGE_TEMPLATE = textwrap.dedent(
u"""\
This will modify the metadata associated with the experiment on
https://tensorboard.dev with the following experiment ID:
{experiment_id}
You have chosen to modify an experiment. All experiments uploaded
to TensorBoard.dev are publicly visible. Do not upload sensitive
data.
"""
)
def __init__(self, experiment_id, name=None, description=None):
self.experiment_id = experiment_id
self.name = name
self.description = description
def get_ack_message_body(self):
return self._MESSAGE_TEMPLATE.format(experiment_id=self.experiment_id)
def execute(self, server_info, channel):
api_client = write_service_pb2_grpc.TensorBoardWriterServiceStub(
channel
)
experiment_id = self.experiment_id
_die_if_bad_experiment_name(self.name)
_die_if_bad_experiment_description(self.description)
if not experiment_id:
raise base_plugin.FlagsError(
"Must specify a non-empty experiment ID to modify."
)
try:
uploader_lib.update_experiment_metadata(
api_client,
experiment_id,
name=self.name,
description=self.description,
)
except uploader_lib.ExperimentNotFoundError:
_die(
"No such experiment %s. Either it never existed or it has "
"already been deleted." % experiment_id
)
except uploader_lib.PermissionDeniedError:
_die(
"Cannot modify experiment %s because it is owned by a "
"different user." % experiment_id
)
except uploader_lib.InvalidArgumentError as e:
_die("Server cannot modify experiment as requested: %s" % e)
except grpc.RpcError as e:
_die("Internal error modifying experiment: %s" % e)
logging.info("Modified experiment %s.", experiment_id)
if self.name is not None:
logging.info("Set name to %r", self.name)
if self.description is not None:
logging.info("Set description to %r", repr(self.description))
class _ListIntent(_Intent):
"""The user intends to list all their experiments."""
_MESSAGE = textwrap.dedent(
u"""\
This will list all experiments that you've uploaded to
https://tensorboard.dev. TensorBoard.dev experiments are visible
to everyone. Do not upload sensitive data.
"""
)
def get_ack_message_body(self):
return self._MESSAGE
def execute(self, server_info, channel):
api_client = export_service_pb2_grpc.TensorBoardExporterServiceStub(
channel
)
fieldmask = experiment_pb2.ExperimentMask(
create_time=True,
update_time=True,
num_scalars=True,
num_runs=True,
num_tags=True,
)
gen = exporter_lib.list_experiments(api_client, fieldmask=fieldmask)
count = 0
for experiment in gen:
count += 1
experiment_id = experiment.experiment_id
url = server_info_lib.experiment_url(server_info, experiment_id)
print(url)
data = [
("Name", experiment.name or "[No Name]"),
("Description", experiment.description or "[No Description]"),
("Id", experiment.experiment_id),
("Created", util.format_time(experiment.create_time)),
("Updated", util.format_time(experiment.update_time)),
("Scalars", str(experiment.num_scalars)),
("Runs", str(experiment.num_runs)),
("Tags", str(experiment.num_tags)),
]
for (name, value) in data:
print("\t%s %s" % (name.ljust(12), value))
sys.stdout.flush()
if not count:
sys.stderr.write(
"No experiments. Use `tensorboard dev upload` to get started.\n"
)
else:
sys.stderr.write("Total: %d experiment(s)\n" % count)
sys.stderr.flush()
def _die_if_bad_experiment_name(name):
if name and len(name) > _EXPERIMENT_NAME_MAX_CHARS:
_die(
"Experiment name is too long. Limit is "
"%s characters.\n"
"%r was provided." % (_EXPERIMENT_NAME_MAX_CHARS, name)
)
def _die_if_bad_experiment_description(description):
if description and len(description) > _EXPERIMENT_DESCRIPTION_MAX_CHARS:
_die(
"Experiment description is too long. Limit is %s characters.\n"
"%r was provided."
% (_EXPERIMENT_DESCRIPTION_MAX_CHARS, description)
)
class _UploadIntent(_Intent):
"""The user intends to upload an experiment from the given logdir."""
_MESSAGE_TEMPLATE = textwrap.dedent(
u"""\
This will upload your TensorBoard logs to https://tensorboard.dev/ from
the following directory:
{logdir}
This TensorBoard will be visible to everyone. Do not upload sensitive
data.
"""
)
def __init__(self, logdir, name=None, description=None):
self.logdir = logdir
self.name = name
self.description = description
def get_ack_message_body(self):
return self._MESSAGE_TEMPLATE.format(logdir=self.logdir)
def execute(self, server_info, channel):
api_client = write_service_pb2_grpc.TensorBoardWriterServiceStub(
channel
)
_die_if_bad_experiment_name(self.name)
_die_if_bad_experiment_description(self.description)
uploader = uploader_lib.TensorBoardUploader(
api_client,
self.logdir,
allowed_plugins=server_info_lib.allowed_plugins(server_info),
name=self.name,
description=self.description,
)
experiment_id = uploader.create_experiment()
url = server_info_lib.experiment_url(server_info, experiment_id)
print(
"Upload started and will continue reading any new data as it's added"
)
print("to the logdir. To stop uploading, press Ctrl-C.")
print("View your TensorBoard live at: %s" % url)
try:
uploader.start_uploading()
except uploader_lib.ExperimentNotFoundError:
print("Experiment was deleted; uploading has been cancelled")
return
except KeyboardInterrupt:
print()
print("Upload stopped. View your TensorBoard at %s" % url)
return
# TODO(@nfelt): make it possible for the upload cycle to end once we
# detect that no more runs are active, so this code can be reached.
print("Done! View your TensorBoard at %s" % url)
class _ExportIntent(_Intent):
"""The user intends to download all their experiment data."""
_MESSAGE_TEMPLATE = textwrap.dedent(
u"""\
This will download all your experiment data from https://tensorboard.dev
and save it to the following directory:
{output_dir}
Downloading your experiment data does not delete it from the
service. All experiments uploaded to TensorBoard.dev are publicly
visible. Do not upload sensitive data.
"""
)
def __init__(self, output_dir):
self.output_dir = output_dir
def get_ack_message_body(self):
return self._MESSAGE_TEMPLATE.format(output_dir=self.output_dir)
def execute(self, server_info, channel):
api_client = export_service_pb2_grpc.TensorBoardExporterServiceStub(
channel
)
outdir = self.output_dir
try:
exporter = exporter_lib.TensorBoardExporter(api_client, outdir)
except exporter_lib.OutputDirectoryExistsError:
msg = "Output directory already exists: %r" % outdir
raise base_plugin.FlagsError(msg)
num_experiments = 0
try:
for experiment_id in exporter.export():
num_experiments += 1
print("Downloaded experiment %s" % experiment_id)
except exporter_lib.GrpcTimeoutException as e:
print(
"\nUploader has failed because of a timeout error. Please reach "
"out via e-mail to tensorboard.dev-support@google.com to get help "
"completing your export of experiment %s." % e.experiment_id
)
print(
"Done. Downloaded %d experiments to: %s" % (num_experiments, outdir)
)
def _get_intent(flags):
"""Determines what the program should do (upload, delete, ...).
Args:
flags: An `argparse.Namespace` with the parsed flags.
Returns:
An `_Intent` instance.
Raises:
base_plugin.FlagsError: If the command-line `flags` do not correctly
specify an intent.
"""
cmd = getattr(flags, _SUBCOMMAND_FLAG, None)
if cmd is None:
raise base_plugin.FlagsError("Must specify subcommand (try --help).")
if cmd == _SUBCOMMAND_KEY_UPLOAD:
if flags.logdir:
return _UploadIntent(
os.path.expanduser(flags.logdir),
name=flags.name,
description=flags.description,
)
else:
raise base_plugin.FlagsError(
"Must specify directory to upload via `--logdir`."
)
if cmd == _SUBCOMMAND_KEY_UPDATE_METADATA:
if flags.experiment_id:
if flags.name is not None or flags.description is not None:
return _UpdateMetadataIntent(
flags.experiment_id,
name=flags.name,
description=flags.description,
)
else:
raise base_plugin.FlagsError(
"Must specify either `--name` or `--description`."
)
else:
raise base_plugin.FlagsError(
"Must specify experiment to modify via `--experiment_id`."
)
elif cmd == _SUBCOMMAND_KEY_DELETE:
if flags.experiment_id:
return _DeleteExperimentIntent(flags.experiment_id)
else:
raise base_plugin.FlagsError(
"Must specify experiment to delete via `--experiment_id`."
)
elif cmd == _SUBCOMMAND_KEY_LIST:
return _ListIntent()
elif cmd == _SUBCOMMAND_KEY_EXPORT:
if flags.outdir:
return _ExportIntent(flags.outdir)
else:
raise base_plugin.FlagsError(
"Must specify output directory via `--outdir`."
)
elif cmd == _SUBCOMMAND_KEY_AUTH:
auth_cmd = getattr(flags, _AUTH_SUBCOMMAND_FLAG, None)
if auth_cmd is None:
raise base_plugin.FlagsError("Must specify a subcommand to `auth`.")
if auth_cmd == _AUTH_SUBCOMMAND_KEY_REVOKE:
return _AuthRevokeIntent()
else:
raise AssertionError("Unknown auth subcommand %r" % (auth_cmd,))
else:
raise AssertionError("Unknown subcommand %r" % (cmd,))
def _get_server_info(flags):
origin = flags.origin or _DEFAULT_ORIGIN
plugins = getattr(flags, "plugins", [])
if flags.api_endpoint and not flags.origin:
return server_info_lib.create_server_info(
origin, flags.api_endpoint, plugins
)
server_info = server_info_lib.fetch_server_info(origin, plugins)
# Override with any API server explicitly specified on the command
# line, but only if the server accepted our initial handshake.
if flags.api_endpoint and server_info.api_server.endpoint:
server_info.api_server.endpoint = flags.api_endpoint
return server_info
def _handle_server_info(info):
compat = info.compatibility
if compat.verdict == server_info_pb2.VERDICT_WARN:
sys.stderr.write("Warning [from server]: %s\n" % compat.details)
sys.stderr.flush()
elif compat.verdict == server_info_pb2.VERDICT_ERROR:
_die("Error [from server]: %s" % compat.details)
else:
# OK or unknown; assume OK.
if compat.details:
sys.stderr.write("%s\n" % compat.details)
sys.stderr.flush()
def _die(message):
sys.stderr.write("%s\n" % (message,))
sys.stderr.flush()
sys.exit(1)
def main(unused_argv):
global _FLAGS
flags = _FLAGS
# Prevent accidental use of `_FLAGS` until migration to TensorBoard
# subcommand is complete, at which point `_FLAGS` goes away.
del _FLAGS
return _run(flags)
class UploaderSubcommand(program.TensorBoardSubcommand):
"""Integration point with `tensorboard` CLI."""
def name(self):
return "dev"
def define_flags(self, parser):
_define_flags(parser)
def run(self, flags):
return _run(flags)
def help(self):
return "upload data to TensorBoard.dev"
if __name__ == "__main__":
app.run(main, flags_parser=_parse_flags)
| 34.008685 | 83 | 0.646967 |
79562180133e5c5e7a90489257b58192dda3ec57 | 41,039 | py | Python | angr/storage/paged_memory.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 1 | 2021-07-07T11:18:34.000Z | 2021-07-07T11:18:34.000Z | angr/storage/paged_memory.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | null | null | null | angr/storage/paged_memory.py | mariusmue/angr | f8304c4b1f0097a721a6692b02a45cabaae137c5 | [
"BSD-2-Clause"
] | 1 | 2022-02-10T02:29:38.000Z | 2022-02-10T02:29:38.000Z | import cooldict
import claripy
import cffi
import cle
from sortedcontainers import SortedDict
from ..errors import SimMemoryError, SimSegfaultError
from .. import sim_options as options
from .memory_object import SimMemoryObject
from claripy.ast.bv import BV
_ffi = cffi.FFI()
import logging
l = logging.getLogger("angr.storage.paged_memory")
class BasePage(object):
"""
Page object, allowing for more flexibility than just a raw dict.
"""
PROT_READ = 1
PROT_WRITE = 2
PROT_EXEC = 4
def __init__(self, page_addr, page_size, permissions=None, executable=False):
"""
Create a new page object. Carries permissions information.
Permissions default to RW unless `executable` is True,
in which case permissions default to RWX.
:param int page_addr: The base address of the page.
:param int page_size: The size of the page.
:param bool executable: Whether the page is executable. Typically,
this will depend on whether the binary has an
executable stack.
:param claripy.AST permissions: A 3-bit bitvector setting specific permissions
for EXEC, READ, and WRITE
"""
self._page_addr = page_addr
self._page_size = page_size
if permissions is None:
perms = Page.PROT_READ|Page.PROT_WRITE
if executable:
perms |= Page.PROT_EXEC
self.permissions = claripy.BVV(perms, 3) # 3 bits is enough for PROT_EXEC, PROT_WRITE, PROT_READ, PROT_NONE
else:
self.permissions = permissions
@property
def concrete_permissions(self):
if self.permissions.symbolic:
return 7
else:
return self.permissions.args[0]
def contains(self, state, idx):
m = self.load_mo(state, idx)
return m is not None and m.includes(idx)
def _resolve_range(self, mo):
start = max(mo.base, self._page_addr)
end = min(mo.last_addr + 1, self._page_addr + self._page_size)
if end <= start:
l.warning("Nothing left of the memory object to store in SimPage.")
return start, end
def store_mo(self, state, new_mo, overwrite=True): #pylint:disable=unused-argument
"""
Stores a memory object.
:param new_mo: the memory object
:param overwrite: whether to overwrite objects already in memory (if false, just fill in the holes)
"""
start, end = self._resolve_range(new_mo)
if overwrite:
self.store_overwrite(state, new_mo, start, end)
else:
self.store_underwrite(state, new_mo, start, end)
def copy(self):
return Page(
self._page_addr, self._page_size,
permissions=self.permissions,
**self._copy_args()
)
#
# Abstract functions
#
def load_mo(self, state, page_idx):
"""
Loads a memory object from memory.
:param page_idx: the index into the page
:returns: a tuple of the object
"""
raise NotImplementedError()
def keys(self):
raise NotImplementedError()
def replace_mo(self, state, old_mo, new_mo):
raise NotImplementedError()
def store_overwrite(self, state, new_mo, start, end):
raise NotImplementedError()
def store_underwrite(self, state, new_mo, start, end):
raise NotImplementedError()
def load_slice(self, state, start, end): #pylint:disable=unused-argument
"""
Return the memory objects overlapping with the provided slice.
:param start: the start address
:param end: the end address (non-inclusive)
:returns: tuples of (starting_addr, memory_object)
"""
raise NotImplementedError()
def _copy_args(self):
raise NotImplementedError()
class TreePage(BasePage):
"""
Page object, implemented with a sorted dict. Who knows what's underneath!
"""
def __init__(self, *args, **kwargs):
storage = kwargs.pop("storage", None)
super(TreePage, self).__init__(*args, **kwargs)
self._storage = SortedDict() if storage is None else storage
def keys(self):
if len(self._storage) == 0:
return set()
else:
return set.union(*(set(range(*self._resolve_range(mo))) for mo in self._storage.itervalues()))
def replace_mo(self, state, old_mo, new_mo):
start, end = self._resolve_range(old_mo)
for key in self._storage.irange(start, end-1):
val = self._storage[key]
if val is old_mo:
#assert new_mo.includes(a)
self._storage[key] = new_mo
def store_overwrite(self, state, new_mo, start, end):
# iterate over each item we might overwrite
# track our mutations separately since we're in the process of iterating
deletes = []
updates = { start: new_mo }
for key in self._storage.irange(maximum=end-1, reverse=True):
old_mo = self._storage[key]
# make sure we aren't overwriting all of an item that overlaps the end boundary
if end < self._page_addr + self._page_size and end not in updates and old_mo.includes(end):
updates[end] = old_mo
# we can't set a minimum on the range because we need to do the above for
# the first object before start too
if key < start:
break
# delete any key that falls within the range
deletes.append(key)
#assert all(m.includes(i) for i,m in updates.items())
# perform mutations
for key in deletes:
del self._storage[key]
self._storage.update(updates)
def store_underwrite(self, state, new_mo, start, end):
# track the point that we need to write up to
last_missing = end - 1
# track also updates since we can't update while iterating
updates = {}
for key in self._storage.irange(maximum=end-1, reverse=True):
mo = self._storage[key]
# if the mo stops
if mo.base <= last_missing and not mo.includes(last_missing):
updates[max(mo.last_addr+1, start)] = new_mo
last_missing = mo.base - 1
# we can't set a minimum on the range because we need to do the above for
# the first object before start too
if last_missing < start:
break
# if there are no memory objects <= start, we won't have filled start yet
if last_missing >= start:
updates[start] = new_mo
#assert all(m.includes(i) for i,m in updates.items())
self._storage.update(updates)
def load_mo(self, state, page_idx):
"""
Loads a memory object from memory.
:param page_idx: the index into the page
:returns: a tuple of the object
"""
try:
key = next(self._storage.irange(maximum=page_idx, reverse=True))
except StopIteration:
return None
else:
return self._storage[key]
def load_slice(self, state, start, end):
"""
Return the memory objects overlapping with the provided slice.
:param start: the start address
:param end: the end address (non-inclusive)
:returns: tuples of (starting_addr, memory_object)
"""
keys = list(self._storage.irange(start, end-1))
if not keys or keys[0] != start:
try:
key = next(self._storage.irange(maximum=start, reverse=True))
except StopIteration:
pass
else:
if self._storage[key].includes(start):
keys.insert(0, key)
return [(key, self._storage[key]) for key in keys]
def _copy_args(self):
return { 'storage': self._storage.copy() }
class ListPage(BasePage):
"""
Page object, implemented with a list.
"""
def __init__(self, *args, **kwargs):
storage = kwargs.pop("storage", None)
self._sinkhole = kwargs.pop("sinkhole", None)
super(ListPage, self).__init__(*args, **kwargs)
self._storage = [ None ] * self._page_size if storage is None else storage
def keys(self):
if self._sinkhole is not None:
return range(self._page_addr, self._page_addr + self._page_size)
else:
return [ self._page_addr + i for i,v in enumerate(self._storage) if v is not None ]
def replace_mo(self, state, old_mo, new_mo):
if self._sinkhole is old_mo:
self._sinkhole = new_mo
else:
start, end = self._resolve_range(old_mo)
for i in range(start, end):
if self._storage[i-self._page_addr] is old_mo:
self._storage[i-self._page_addr] = new_mo
def store_overwrite(self, state, new_mo, start, end):
if start == self._page_addr and end == self._page_addr + self._page_size:
self._sinkhole = new_mo
self._storage = [ None ] * self._page_size
else:
for i in range(start, end):
self._storage[i-self._page_addr] = new_mo
def store_underwrite(self, state, new_mo, start, end):
if start == self._page_addr and end == self._page_addr + self._page_size:
self._sinkhole = new_mo
else:
for i in range(start, end):
if self._storage[i-self._page_addr] is None:
self._storage[i-self._page_addr] = new_mo
def load_mo(self, state, page_idx):
"""
Loads a memory object from memory.
:param page_idx: the index into the page
:returns: a tuple of the object
"""
mo = self._storage[page_idx-self._page_addr]
return self._sinkhole if mo is None else mo
def load_slice(self, state, start, end):
"""
Return the memory objects overlapping with the provided slice.
:param start: the start address
:param end: the end address (non-inclusive)
:returns: tuples of (starting_addr, memory_object)
"""
items = [ ]
if start > self._page_addr + self._page_size or end < self._page_addr:
l.warning("Calling load_slice on the wrong page.")
return items
for addr in range(max(start, self._page_addr), min(end, self._page_addr + self._page_size)):
i = addr - self._page_addr
mo = self._storage[i]
if mo is None:
mo = self._sinkhole
if mo is not None and (not items or items[-1][1] is not mo):
items.append((addr, mo))
return items
def _copy_args(self):
return { 'storage': list(self._storage), 'sinkhole': self._sinkhole }
Page = ListPage
#pylint:disable=unidiomatic-typecheck
class SimPagedMemory(object):
"""
Represents paged memory.
"""
def __init__(self, memory_backer=None, permissions_backer=None, pages=None, initialized=None, name_mapping=None, hash_mapping=None, page_size=None, symbolic_addrs=None, check_permissions=False):
self._cowed = set()
self._memory_backer = { } if memory_backer is None else memory_backer
self._permissions_backer = permissions_backer # saved for copying
self._executable_pages = False if permissions_backer is None else permissions_backer[0]
self._permission_map = { } if permissions_backer is None else permissions_backer[1]
self._pages = { } if pages is None else pages
self._initialized = set() if initialized is None else initialized
self._page_size = 0x1000 if page_size is None else page_size
self._symbolic_addrs = dict() if symbolic_addrs is None else symbolic_addrs
self.state = None
self._preapproved_stack = xrange(0)
self._check_perms = check_permissions
# reverse mapping
self._name_mapping = cooldict.BranchingDict() if name_mapping is None else name_mapping
self._hash_mapping = cooldict.BranchingDict() if hash_mapping is None else hash_mapping
self._updated_mappings = set()
def __getstate__(self):
return {
'_memory_backer': self._memory_backer,
'_permissions_backer': self._permissions_backer,
'_executable_pages': self._executable_pages,
'_permission_map': self._permission_map,
'_pages': self._pages,
'_initialized': self._initialized,
'_page_size': self._page_size,
'state': None,
'_name_mapping': self._name_mapping,
'_hash_mapping': self._hash_mapping,
'_symbolic_addrs': self._symbolic_addrs,
'_preapproved_stack': self._preapproved_stack,
'_check_perms': self._check_perms
}
def __setstate__(self, s):
self._cowed = set()
self.__dict__.update(s)
def branch(self):
new_name_mapping = self._name_mapping.branch() if options.REVERSE_MEMORY_NAME_MAP in self.state.options else self._name_mapping
new_hash_mapping = self._hash_mapping.branch() if options.REVERSE_MEMORY_HASH_MAP in self.state.options else self._hash_mapping
new_pages = dict(self._pages)
self._cowed = set()
m = SimPagedMemory(memory_backer=self._memory_backer,
permissions_backer=self._permissions_backer,
pages=new_pages,
initialized=set(self._initialized),
page_size=self._page_size,
name_mapping=new_name_mapping,
hash_mapping=new_hash_mapping,
symbolic_addrs=dict(self._symbolic_addrs),
check_permissions=self._check_perms)
m._preapproved_stack = self._preapproved_stack
return m
def __getitem__(self, addr):
page_num = addr / self._page_size
page_idx = addr
#print "GET", addr, page_num, page_idx
try:
v = self._get_page(page_num).load_mo(self.state, page_idx)
return v
except KeyError:
raise KeyError(addr)
def __setitem__(self, addr, v):
page_num = addr / self._page_size
page_idx = addr
#print "SET", addr, page_num, page_idx
self._get_page(page_num, write=True, create=True)[page_idx] = v
self._update_mappings(addr, v.object)
#print "...",id(self._pages[page_num])
def __delitem__(self, addr):
raise Exception("For performance reasons, deletion is not supported. Contact Yan if this needs to change.")
# Specifically, the above is for two reasons:
#
# 1. deleting stuff out of memory doesn't make sense
# 2. if the page throws a key error, the backer dict is accessed. Thus, deleting things would simply
# change them back to what they were in the backer dict
@property
def allow_segv(self):
return self._check_perms and not self.state.scratch.priv and options.STRICT_PAGE_ACCESS in self.state.options
@property
def byte_width(self):
return self.state.arch.byte_width if self.state is not None else 8
def load_objects(self, addr, num_bytes, ret_on_segv=False):
"""
Load memory objects from paged memory.
:param addr: Address to start loading.
:param num_bytes: Number of bytes to load.
:param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise
a SimSegfaultError will be raised.
:return: list of tuples of (addr, memory_object)
:rtype: tuple
"""
result = [ ]
end = addr + num_bytes
for page_addr in self._containing_pages(addr, end):
try:
#print "Getting page %x" % (page_addr / self._page_size)
page = self._get_page(page_addr / self._page_size)
#print "... got it"
except KeyError:
#print "... missing"
#print "... SEGV"
# missing page
if self.allow_segv:
if ret_on_segv:
break
raise SimSegfaultError(addr, 'read-miss')
else:
continue
if self.allow_segv and not page.concrete_permissions & Page.PROT_READ:
#print "... SEGV"
if ret_on_segv:
break
raise SimSegfaultError(addr, 'non-readable')
result.extend(page.load_slice(self.state, addr, end))
return result
#
# Page management
#
def _create_page(self, page_num, permissions=None):
return Page(
page_num*self._page_size, self._page_size,
executable=self._executable_pages, permissions=permissions
)
def _initialize_page(self, n, new_page):
if n in self._initialized:
return False
self._initialized.add(n)
new_page_addr = n*self._page_size
initialized = False
if self.state is not None:
self.state.scratch.push_priv(True)
if self._memory_backer is None:
pass
elif isinstance(self._memory_backer, cle.Clemory):
# first, find the right clemory backer
for addr, backer in self._memory_backer.cbackers if self.byte_width == 8 else ((x, y) for x, _, y in self._memory_backer.stride_repr):
start_backer = new_page_addr - addr
if isinstance(start_backer, BV):
continue
if start_backer < 0 and abs(start_backer) >= self._page_size:
continue
if start_backer >= len(backer):
continue
# find permission backer associated with the address
# fall back to read-write if we can't find any...
flags = Page.PROT_READ | Page.PROT_WRITE
for start, end in self._permission_map:
if start <= new_page_addr < end:
flags = self._permission_map[(start, end)]
break
snip_start = max(0, start_backer)
write_start = max(new_page_addr, addr + snip_start)
write_size = self._page_size - write_start%self._page_size
if self.byte_width == 8:
snip = _ffi.buffer(backer)[snip_start:snip_start+write_size]
mo = SimMemoryObject(claripy.BVV(snip), write_start, byte_width=self.byte_width)
self._apply_object_to_page(n*self._page_size, mo, page=new_page)
else:
for i, byte in enumerate(backer):
mo = SimMemoryObject(claripy.BVV(byte, self.byte_width), write_start + i, byte_width=self.byte_width)
self._apply_object_to_page(n*self._page_size, mo, page=new_page)
new_page.permissions = claripy.BVV(flags, 3)
initialized = True
elif len(self._memory_backer) <= self._page_size:
for i in self._memory_backer:
if new_page_addr <= i and i <= new_page_addr + self._page_size:
if isinstance(self._memory_backer[i], claripy.ast.Base):
backer = self._memory_backer[i]
elif isinstance(self._memory_backer[i], bytes):
backer = claripy.BVV(self._memory_backer[i])
else:
backer = claripy.BVV(self._memory_backer[i], self.byte_width)
mo = SimMemoryObject(backer, i, byte_width=self.byte_width)
self._apply_object_to_page(n*self._page_size, mo, page=new_page)
initialized = True
elif len(self._memory_backer) > self._page_size:
for i in range(self._page_size):
try:
if isinstance(self._memory_backer[i], claripy.ast.Base):
backer = self._memory_backer[i]
elif isinstance(self._memory_backer[i], bytes):
backer = claripy.BVV(self._memory_backer[i])
else:
backer = claripy.BVV(self._memory_backer[i], self.byte_width)
mo = SimMemoryObject(backer, new_page_addr+i, byte_width=self.byte_width)
self._apply_object_to_page(n*self._page_size, mo, page=new_page)
initialized = True
except KeyError:
pass
if self.state is not None:
self.state.scratch.pop_priv()
return initialized
def _get_page(self, page_num, write=False, create=False, initialize=True):
page_addr = page_num * self._page_size
try:
page = self._pages[page_num]
except KeyError:
if not (initialize or create or page_addr in self._preapproved_stack):
raise
page = self._create_page(page_num)
self._symbolic_addrs[page_num] = set()
if initialize:
initialized = self._initialize_page(page_num, page)
if not initialized and not create and page_addr not in self._preapproved_stack:
raise
self._pages[page_num] = page
self._cowed.add(page_num)
return page
if write and page_num not in self._cowed:
page = page.copy()
self._symbolic_addrs[page_num] = set(self._symbolic_addrs[page_num])
self._cowed.add(page_num)
self._pages[page_num] = page
return page
def __contains__(self, addr):
try:
return self.__getitem__(addr) is not None
except KeyError:
return False
def contains_no_backer(self, addr):
"""
Tests if the address is contained in any page of paged memory, without considering memory backers.
:param int addr: The address to test.
:return: True if the address is included in one of the pages, False otherwise.
:rtype: bool
"""
for i, p in self._pages.iteritems():
if i * self._page_size <= addr < (i + 1) * self._page_size:
return addr - (i * self._page_size) in p.keys()
return False
def keys(self):
sofar = set()
sofar.update(self._memory_backer.keys())
for i, p in self._pages.items():
sofar.update([k + i * self._page_size for k in p.keys()])
return sofar
def __len__(self):
return len(self.keys())
def changed_bytes(self, other):
return self.__changed_bytes(other)
def __changed_bytes(self, other):
"""
Gets the set of changed bytes between `self` and `other`.
:type other: SimPagedMemory
:returns: A set of differing bytes.
"""
if self._page_size != other._page_size:
raise SimMemoryError("SimPagedMemory page sizes differ. This is asking for disaster.")
our_pages = set(self._pages.keys())
their_pages = set(other._pages.keys())
their_additions = their_pages - our_pages
our_additions = our_pages - their_pages
common_pages = our_pages & their_pages
candidates = set()
for p in their_additions:
candidates.update(other._pages[p].keys())
for p in our_additions:
candidates.update(self._pages[p].keys())
for p in common_pages:
our_page = self._pages[p]
their_page = other._pages[p]
if our_page is their_page:
continue
our_keys = set(our_page.keys())
their_keys = set(their_page.keys())
changes = (our_keys - their_keys) | (their_keys - our_keys) | {
i for i in (our_keys & their_keys) if our_page.load_mo(self.state, i) is not their_page.load_mo(self.state, i)
}
candidates.update(changes)
#both_changed = our_changes & their_changes
#ours_changed_only = our_changes - both_changed
#theirs_changed_only = their_changes - both_changed
#both_deleted = their_deletions & our_deletions
#ours_deleted_only = our_deletions - both_deleted
#theirs_deleted_only = their_deletions - both_deleted
differences = set()
for c in candidates:
if c not in self and c in other:
differences.add(c)
elif c in self and c not in other:
differences.add(c)
else:
if type(self[c]) is not SimMemoryObject:
self[c] = SimMemoryObject(self.state.se.BVV(ord(self[c]), self.byte_width), c, byte_width=self.byte_width)
if type(other[c]) is not SimMemoryObject:
other[c] = SimMemoryObject(self.state.se.BVV(ord(other[c]), self.byte_width), c, byte_width=self.byte_width)
if c in self and self[c] != other[c]:
# Try to see if the bytes are equal
self_byte = self[c].bytes_at(c, 1)
other_byte = other[c].bytes_at(c, 1)
if self_byte is not other_byte:
#l.debug("%s: offset %x, two different bytes %s %s from %s %s", self.id, c,
# self_byte, other_byte,
# self[c].object.model, other[c].object.model)
differences.add(c)
else:
# this means the byte is in neither memory
pass
return differences
#
# Memory object management
#
def _apply_object_to_page(self, page_base, mo, page=None, overwrite=True):
"""
Writes a memory object to a `page`
:param page_base: The base address of the page.
:param mo: The memory object.
:param page: (optional) the page to use.
:param overwrite: (optional) If False, only write to currently-empty memory.
"""
page_num = page_base / self._page_size
try:
page = self._get_page(page_num,
write=True,
create=not self.allow_segv) if page is None else page
except KeyError:
if self.allow_segv:
raise SimSegfaultError(mo.base, 'write-miss')
else:
raise
if self.allow_segv and not page.concrete_permissions & Page.PROT_WRITE:
raise SimSegfaultError(mo.base, 'non-writable')
page.store_mo(self.state, mo, overwrite=overwrite)
return True
def _containing_pages(self, mo_start, mo_end):
page_start = mo_start - mo_start%self._page_size
page_end = mo_end + (self._page_size - mo_end%self._page_size) if mo_end % self._page_size else mo_end
return [ b for b in range(page_start, page_end, self._page_size) ]
def _containing_pages_mo(self, mo):
mo_start = mo.base
mo_end = mo.base + mo.length
return self._containing_pages(mo_start, mo_end)
def store_memory_object(self, mo, overwrite=True):
"""
This function optimizes a large store by storing a single reference to the :class:`SimMemoryObject` instead of
one for each byte.
:param memory_object: the memory object to store
"""
for p in self._containing_pages_mo(mo):
self._apply_object_to_page(p, mo, overwrite=overwrite)
self._update_range_mappings(mo.base, mo.object, mo.length)
def replace_memory_object(self, old, new_content):
"""
Replaces the memory object `old` with a new memory object containing `new_content`.
:param old: A SimMemoryObject (i.e., one from :func:`memory_objects_for_hash()` or :func:`
memory_objects_for_name()`).
:param new_content: The content (claripy expression) for the new memory object.
:returns: the new memory object
"""
if old.object.size() != new_content.size():
raise SimMemoryError("memory objects can only be replaced by the same length content")
new = SimMemoryObject(new_content, old.base, byte_width=self.byte_width)
for p in self._containing_pages_mo(old):
self._get_page(p/self._page_size, write=True).replace_mo(self.state, old, new)
if isinstance(new.object, claripy.ast.BV):
for b in range(old.base, old.base+old.length):
self._update_mappings(b, new.object)
return new
def replace_all(self, old, new):
"""
Replaces all instances of expression `old` with expression `new`.
:param old: A claripy expression. Must contain at least one named variable (to make it possible to use the
name index for speedup).
:param new: The new variable to replace it with.
"""
if options.REVERSE_MEMORY_NAME_MAP not in self.state.options:
raise SimMemoryError("replace_all is not doable without a reverse name mapping. Please add "
"sim_options.REVERSE_MEMORY_NAME_MAP to the state options")
if not isinstance(old, claripy.ast.BV) or not isinstance(new, claripy.ast.BV):
raise SimMemoryError("old and new arguments to replace_all() must be claripy.BV objects")
if len(old.variables) == 0:
raise SimMemoryError("old argument to replace_all() must have at least one named variable")
# Compute an intersection between sets of memory objects for each unique variable name. The eventual memory
# object set contains all memory objects that we should update.
memory_objects = None
for v in old.variables:
if memory_objects is None:
memory_objects = self.memory_objects_for_name(v)
elif len(memory_objects) == 0:
# It's a set and it's already empty
# there is no way for it to go back...
break
else:
memory_objects &= self.memory_objects_for_name(v)
replaced_objects_cache = { }
for mo in memory_objects:
replaced_object = None
if mo.object in replaced_objects_cache:
if mo.object is not replaced_objects_cache[mo.object]:
replaced_object = replaced_objects_cache[mo.object]
else:
replaced_object = mo.object.replace(old, new)
replaced_objects_cache[mo.object] = replaced_object
if mo.object is replaced_object:
# The replace does not really occur
replaced_object = None
if replaced_object is not None:
self.replace_memory_object(mo, replaced_object)
#
# Mapping bullshit
#
def _mark_updated_mapping(self, d, m):
if m in self._updated_mappings:
return
if options.REVERSE_MEMORY_HASH_MAP not in self.state.options and d is self._hash_mapping:
#print "ABORTING FROM HASH"
return
if options.REVERSE_MEMORY_NAME_MAP not in self.state.options and d is self._name_mapping:
#print "ABORTING FROM NAME"
return
#print m
#SimSymbolicMemory.wtf += 1
#print SimSymbolicMemory.wtf
try:
d[m] = set(d[m])
except KeyError:
d[m] = set()
self._updated_mappings.add(m)
def _update_range_mappings(self, actual_addr, cnt, size):
if self.state is None or not \
(options.REVERSE_MEMORY_NAME_MAP in self.state.options or
options.REVERSE_MEMORY_HASH_MAP in self.state.options or
options.MEMORY_SYMBOLIC_BYTES_MAP in self.state.options):
return
for i in range(actual_addr, actual_addr+size):
self._update_mappings(i, cnt)
def _update_mappings(self, actual_addr, cnt):
if options.MEMORY_SYMBOLIC_BYTES_MAP in self.state.options:
page_num = actual_addr / self._page_size
page_idx = actual_addr
if self.state.se.symbolic(cnt):
self._symbolic_addrs[page_num].add(page_idx)
else:
self._symbolic_addrs[page_num].discard(page_idx)
if not (options.REVERSE_MEMORY_NAME_MAP in self.state.options or
options.REVERSE_MEMORY_HASH_MAP in self.state.options):
return
if (options.REVERSE_MEMORY_HASH_MAP not in self.state.options) and \
len(self.state.se.variables(cnt)) == 0:
return
l.debug("Updating mappings at address 0x%x", actual_addr)
try:
l.debug("... removing old mappings")
# remove this address for the old variables
old_obj = self[actual_addr]
if isinstance(old_obj, SimMemoryObject):
old_obj = old_obj.object
if isinstance(old_obj, claripy.ast.BV):
if options.REVERSE_MEMORY_NAME_MAP in self.state.options:
var_set = self.state.se.variables(old_obj)
for v in var_set:
self._mark_updated_mapping(self._name_mapping, v)
self._name_mapping[v].discard(actual_addr)
if len(self._name_mapping[v]) == 0:
self._name_mapping.pop(v, None)
if options.REVERSE_MEMORY_HASH_MAP in self.state.options:
h = hash(old_obj)
self._mark_updated_mapping(self._hash_mapping, h)
self._hash_mapping[h].discard(actual_addr)
if len(self._hash_mapping[h]) == 0:
self._hash_mapping.pop(h, None)
except KeyError:
pass
l.debug("... adding new mappings")
if options.REVERSE_MEMORY_NAME_MAP in self.state.options:
# add the new variables to the mapping
var_set = self.state.se.variables(cnt)
for v in var_set:
self._mark_updated_mapping(self._name_mapping, v)
if v not in self._name_mapping:
self._name_mapping[v] = set()
self._name_mapping[v].add(actual_addr)
if options.REVERSE_MEMORY_HASH_MAP in self.state.options:
# add the new variables to the hash->addrs mapping
h = hash(cnt)
self._mark_updated_mapping(self._hash_mapping, h)
if h not in self._hash_mapping:
self._hash_mapping[h] = set()
self._hash_mapping[h].add(actual_addr)
def get_symbolic_addrs(self):
symbolic_addrs = set()
for page in self._symbolic_addrs:
symbolic_addrs.update(self._symbolic_addrs[page])
return symbolic_addrs
def addrs_for_name(self, n):
"""
Returns addresses that contain expressions that contain a variable named `n`.
"""
if n not in self._name_mapping:
return
self._mark_updated_mapping(self._name_mapping, n)
to_discard = set()
for e in self._name_mapping[n]:
try:
if n in self[e].object.variables: yield e
else: to_discard.add(e)
except KeyError:
to_discard.add(e)
self._name_mapping[n] -= to_discard
def addrs_for_hash(self, h):
"""
Returns addresses that contain expressions that contain a variable with the hash of `h`.
"""
if h not in self._hash_mapping:
return
self._mark_updated_mapping(self._hash_mapping, h)
to_discard = set()
for e in self._hash_mapping[h]:
try:
if h == hash(self[e].object): yield e
else: to_discard.add(e)
except KeyError:
to_discard.add(e)
self._hash_mapping[h] -= to_discard
def memory_objects_for_name(self, n):
"""
Returns a set of :class:`SimMemoryObjects` that contain expressions that contain a variable with the name of
`n`.
This is useful for replacing those values in one fell swoop with :func:`replace_memory_object()`, even if
they have been partially overwritten.
"""
return set([ self[i] for i in self.addrs_for_name(n)])
def memory_objects_for_hash(self, n):
"""
Returns a set of :class:`SimMemoryObjects` that contain expressions that contain a variable with the hash
`h`.
"""
return set([ self[i] for i in self.addrs_for_hash(n)])
def permissions(self, addr, permissions=None):
"""
Returns the permissions for a page at address `addr`.
If optional argument permissions is given, set page permissions to that prior to returning permissions.
"""
if self.state.se.symbolic(addr):
raise SimMemoryError("page permissions cannot currently be looked up for symbolic addresses")
if isinstance(addr, claripy.ast.bv.BV):
addr = self.state.se.eval(addr)
page_num = addr / self._page_size
try:
page = self._get_page(page_num)
except KeyError:
raise SimMemoryError("page does not exist at given address")
# Set permissions for the page
if permissions is not None:
if isinstance(permissions, (int, long)):
permissions = claripy.BVV(permissions, 3)
if not isinstance(permissions,claripy.ast.bv.BV):
raise SimMemoryError("Unknown permissions argument type of {0}.".format(type(permissions)))
page.permissions = permissions
return page.permissions
def map_region(self, addr, length, permissions, init_zero=False):
if o.TRACK_MEMORY_MAPPING not in self.state.options:
return
if self.state.se.symbolic(addr):
raise SimMemoryError("cannot map region with a symbolic address")
if isinstance(addr, claripy.ast.bv.BV):
addr = self.state.se.max_int(addr)
base_page_num = addr / self._page_size
# round length
pages = length / self._page_size
if length % self._page_size > 0:
pages += 1
# this check should not be performed when constructing a CFG
if self.state.mode != 'fastpath':
for page in xrange(pages):
page_id = base_page_num + page
if page_id * self._page_size in self:
err = "map_page received address and length combination which contained mapped page"
l.warning(err)
raise SimMemoryError(err)
if isinstance(permissions, (int, long)):
permissions = claripy.BVV(permissions, 3)
for page in xrange(pages):
page_id = base_page_num + page
self._pages[page_id] = self._create_page(page_id, permissions=permissions)
self._symbolic_addrs[page_id] = set()
if init_zero:
if self.state is not None:
self.state.scratch.push_priv(True)
mo = SimMemoryObject(claripy.BVV(0, self._page_size * self.byte_width), page_id*self._page_size, byte_width=self.byte_width)
self._apply_object_to_page(page_id*self._page_size, mo, page=self._pages[page_id])
if self.state is not None:
self.state.scratch.pop_priv()
def unmap_region(self, addr, length):
if o.TRACK_MEMORY_MAPPING not in self.state.options:
return
if self.state.se.symbolic(addr):
raise SimMemoryError("cannot unmap region with a symbolic address")
if isinstance(addr, claripy.ast.bv.BV):
addr = self.state.se.max_int(addr)
base_page_num = addr / self._page_size
pages = length / self._page_size
if length % self._page_size > 0:
pages += 1
# this check should not be performed when constructing a CFG
if self.state.mode != 'fastpath':
for page in xrange(pages):
if base_page_num + page not in self._pages:
l.warning("unmap_region received address and length combination is not mapped")
return
for page in xrange(pages):
del self._pages[base_page_num + page]
del self._symbolic_addrs[base_page_num + page]
from .. import sim_options as o
| 38.534272 | 198 | 0.596798 |
795621ad66c8670b5b95d1930daff7584b423e96 | 1,138 | py | Python | vcorelib/math/__init__.py | vkottler/vcorelib | 97c3b92932d5b2f8c6d9cdca55f34bf167980a21 | [
"MIT"
] | 1 | 2022-03-31T09:26:04.000Z | 2022-03-31T09:26:04.000Z | vcorelib/math/__init__.py | vkottler/vcorelib | 97c3b92932d5b2f8c6d9cdca55f34bf167980a21 | [
"MIT"
] | 2 | 2022-03-31T09:35:06.000Z | 2022-03-31T09:38:07.000Z | vcorelib/math/__init__.py | vkottler/vcorelib | 97c3b92932d5b2f8c6d9cdca55f34bf167980a21 | [
"MIT"
] | null | null | null | """
Math utilities.
"""
# built-in
import typing
class UnitSystem(typing.NamedTuple):
"""
A pairing of prefixes defining a unit, and the amount that indicates the
multiplicative step-size between them.
"""
prefixes: typing.Sequence[str]
divisor: int
SI_UNITS = UnitSystem(["n", "u", "m", "", "k", "M", "G", "T"], 1000)
KIBI_UNITS = UnitSystem(
["", "Ki", "Mi", "Gi", "Ti", "Pi", "Ei", "Zi", "Yi"], 1024
)
def unit_traverse(
val: int,
unit: UnitSystem = SI_UNITS,
max_prefix: int = 3,
iteration: int = 0,
) -> typing.Tuple[int, int, str]:
"""
Given an initial value, traverse a unit system to get the largest
representative unit prefix. Also return a fractional component, in units
of the next-smallest prefix.
"""
prefixes = unit.prefixes
divisor = unit.divisor
decimal = val
fractional = 0
max_iter = min(len(prefixes) - 1, max_prefix)
while decimal >= divisor and iteration < max_iter:
fractional = decimal % divisor
decimal = decimal // divisor
iteration += 1
return decimal, fractional, prefixes[iteration]
| 23.22449 | 76 | 0.626538 |
795622962a2846cc34c80885fed9ff7cbed3d3de | 7,676 | py | Python | ripper/ripper.py | bdunford/ripper | ec5cb3f652ad5dee8b78f0b7d24f79f78442cb02 | [
"MIT"
] | null | null | null | ripper/ripper.py | bdunford/ripper | ec5cb3f652ad5dee8b78f0b7d24f79f78442cb02 | [
"MIT"
] | null | null | null | ripper/ripper.py | bdunford/ripper | ec5cb3f652ad5dee8b78f0b7d24f79f78442cb02 | [
"MIT"
] | null | null | null | import os
import sys
import traceback
import uuid
import time
import copy
from .requester import Requester
from .resources import Resources, Resource
from .writer import Writer
from .window import Window
from .asset import Asset
from .threads import Threader
from .pages import Pages, Page
from .stripper import Stripper
from urlparse import urlparse, urljoin
from threading import *
class Generic(object):
pass
#TODO NEED TO TREAT DATA LIKE SCRIPTS and replace references
class Ripper(object):
def __init__(self, logger=None, threads=4):
self.logger = logger
self.threads = threads
self.stats = {}
self.assets = []
self.errors = []
self.pages = []
self.is_threading = False
self.extension = ".html"
self.totals = {"pages":1,"assets":0, "pages_down":0, "assets_down":0}
self.lock = Lock()
def rip(self,url,base, top_level_pages=True):
self.url = url
self.base = base
self.index_file = os.path.join(base,"index" + self.extension)
self.stats = {}
self.assets = []
self.pages = []
index = self._get_page(self.url);
self._update_totals("pages_down",1)
if not index:
raise ValueError('Could not access website. Is the URL correct?')
self.pages = self._get_page_links(self.url,index)
if top_level_pages:
self._update_totals("pages",len(self.pages))
for p in self.pages:
if not p.exists:
if p.replace_reference:
content = self._get_page(p.url);
if not content:
content = "<html></html>"
else:
content = self._update_page_links(content,self.extension)
pages = self._get_page_links(self.url,content)
content = self._update_page_links(content,self.extension)
content = self._remove_page_links(content,pages)
content = self._remove_trackers(content)
p.downloaded = True
Writer.write(content,os.path.join(self.base,p.name + self.extension))
self.logger(self,p)
self._update_totals("pages_down",1)
index = self._update_page_links(index,self.extension)
else:
index = self._remove_page_links(index,self.pages,False)
index = self._remove_trackers(index)
Writer.write(index,self.index_file)
self.logger(self,Page(urlparse(self.url).path,self.url,"index",False))
def _get_page(self,url,relative_assets=False):
requester = Requester()
page = requester.get_source(url)
if page:
content = page.text
marker = str(uuid.uuid4())
threader = False
if not self.is_threading:
threader = Threader(self.threads,self._log_asset)
threader.start(True)
self.is_threading = True
resources = Resources(page)
self._update_totals("assets",self._get_total_assets_to_download(resources))
for r in resources:
if threader:
threader.add((self._get_asset,{"resource" : r, "marker" : marker}))
else:
asset = self._get_asset(r,marker)
self._log_asset(asset)
if threader:
threader.finish()
if threader.errors:
self.errors += threader.errors
return self._update_page_assets(content,marker,relative_assets)
else:
return False
def _get_page_links(self,url,content):
pages = []
for p in Pages(url,content):
if p.reference in map(lambda x: x.url, self.assets):
p.replace_reference = False
pages.append(p)
return pages
def _get_asset(self,resource,marker):
requester = Requester()
asset_exists = self._find_asset_by_reference(resource,marker)
if asset_exists:
return asset_exists
else:
asset = Asset(self.base,resource,marker)
if asset.mime:
x = requester.get_stream(asset.source.url) if asset.mime.stream else self._get_page(asset.source.url,False if asset.mime.category == "scripts" else True)
if x:
Writer.write(x,asset.path)
asset.downloaded = True
return asset
def _find_asset_by_reference(self,resource,marker):
find = self._check_asset_exists(resource)
if find:
asset = copy.deepcopy(find)
asset.resource = resource
asset.marker = marker
asset.existing = True
return asset
else:
return False
def _check_asset_exists(self,resource):
find = filter(lambda a: a.source.reference == resource.reference or a.source.url == resource.url,list(self.assets))
if len(find) > 0:
return find[0]
else:
return False
def _log_asset(self,asset):
if asset.downloaded:
self._update_stats(asset.mime)
if not asset.existing:
self._update_totals("assets_down",1)
self.assets.append(asset)
self.logger(self,asset)
def _update_page_assets(self,content,marker,relative_assets):
for asset in self.assets:
if asset.downloaded == True and asset.marker == marker:
content = self._find_and_replace(content,asset.source.reference,asset.relative_url if relative_assets else asset.url)
return content
def _update_page_links(self,content,extension):
for page in self.pages:
if page.replace_reference:
wrap = "{0}{1}{0}"
content = self._find_and_replace(content,wrap.format('"',page.reference),wrap.format('"',page.name + extension))
content = self._find_and_replace(content,wrap.format("'",page.reference),wrap.format("'",page.name + extension))
return content
def _remove_page_links(self,content,pages,check_pages=True):
for page in pages:
if len(filter(lambda p: p.name == page.name,self.pages)) == 0 or not check_pages:
if page.replace_reference:
wrap = "{0}{1}{0}"
content = self._find_and_replace(content,wrap.format('"',page.reference),wrap.format('"#',page.name))
content = self._find_and_replace(content,wrap.format("'",page.reference),wrap.format("'#",page.name))
return content
def _remove_trackers(self, content):
return Stripper.Strip(content)
def _find_and_replace(self,text,find,replace):
text = text.replace(find,replace)
return text
def _update_stats(self,mime):
if mime.category in self.stats:
self.stats[mime.category] += 1
else:
self.stats[mime.category] = 1
def _update_totals(self,key,value):
with self.lock:
self.totals[key] += value
def _get_total_assets_to_download(self,resources):
#TODO this needs to compare both the reference and the url just like the _find_asset_by_reference
return len(list(filter(lambda r: self._check_asset_exists(r),list(resources))))
#return len(list(filter(lambda r: r.reference not in map(lambda a: a.source.reference,list(self.assets)),list(resources._resources))))
| 35.702326 | 169 | 0.593538 |
795622ea1a9d0a97a116f53672563e473090fdf2 | 42 | py | Python | vehicle/adapt-sysroot/ros_install_isolated/lib/python2.7/dist-packages/rosgraph_msgs/msg/__init__.py | slicht-uri/Sandshark-Beta-Lab- | 6cff36b227b49b776d13187c307e648d2a52bdae | [
"MIT"
] | null | null | null | vehicle/adapt-sysroot/ros_install_isolated/lib/python2.7/dist-packages/rosgraph_msgs/msg/__init__.py | slicht-uri/Sandshark-Beta-Lab- | 6cff36b227b49b776d13187c307e648d2a52bdae | [
"MIT"
] | null | null | null | vehicle/adapt-sysroot/ros_install_isolated/lib/python2.7/dist-packages/rosgraph_msgs/msg/__init__.py | slicht-uri/Sandshark-Beta-Lab- | 6cff36b227b49b776d13187c307e648d2a52bdae | [
"MIT"
] | 1 | 2019-10-18T06:25:14.000Z | 2019-10-18T06:25:14.000Z | from ._Log import *
from ._Clock import *
| 14 | 21 | 0.714286 |
795622f2437de0e09bc095361e60acafc6679d96 | 387 | py | Python | aocpo_backend/aocpo/asgi.py | CoderChen01/aocpo | 279bfae910a30be762e1954df1a53a6217a6e300 | [
"Apache-2.0"
] | 7 | 2020-02-17T12:20:26.000Z | 2021-03-15T01:02:34.000Z | aocpo_backend/aocpo/asgi.py | CoderChen01/aocpo | 279bfae910a30be762e1954df1a53a6217a6e300 | [
"Apache-2.0"
] | 3 | 2020-04-19T03:01:41.000Z | 2020-04-19T03:02:09.000Z | aocpo_backend/aocpo/asgi.py | CoderChen01/aocpo | 279bfae910a30be762e1954df1a53a6217a6e300 | [
"Apache-2.0"
] | null | null | null | """
ASGI config for aocpo project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'aocpo.settings')
application = get_asgi_application()
| 22.764706 | 78 | 0.782946 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.