hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a12c438c7a83754218601ba458d96ccb0a07ee3
| 496
|
py
|
Python
|
farms/migrations/0004_address_state.py
|
FlowFX/sturdy-potato
|
d33c6695f71cf8e6517dbd5a4d80c17df38734a8
|
[
"MIT"
] | 12
|
2017-04-22T11:15:01.000Z
|
2021-08-10T15:09:12.000Z
|
farms/migrations/0004_address_state.py
|
FlowFX/sturdy-potato
|
d33c6695f71cf8e6517dbd5a4d80c17df38734a8
|
[
"MIT"
] | 59
|
2017-03-10T18:23:31.000Z
|
2018-02-01T15:16:30.000Z
|
farms/migrations/0004_address_state.py
|
FlowFX/sturdy-potato
|
d33c6695f71cf8e6517dbd5a4d80c17df38734a8
|
[
"MIT"
] | 1
|
2020-07-10T09:26:11.000Z
|
2020-07-10T09:26:11.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-04-12 15:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('farms', '0003_address_municipality'),
]
operations = [
migrations.AddField(
model_name='address',
name='state',
field=models.CharField(default='', max_length=255),
preserve_default=False,
),
]
| 22.545455
| 63
| 0.612903
|
4a12c4c90bb0643bdc8e066da85866e4583d03e4
| 279
|
py
|
Python
|
tests/test_examples.py
|
agonopol/sent-sim
|
bb1467226a2c9db41f46582f3a3658de95af0b32
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_examples.py
|
agonopol/sent-sim
|
bb1467226a2c9db41f46582f3a3658de95af0b32
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_examples.py
|
agonopol/sent-sim
|
bb1467226a2c9db41f46582f3a3658de95af0b32
|
[
"BSD-3-Clause"
] | null | null | null |
from sentsim import similarity
def test_similirity():
assert 1 == similarity("this is a sentence", "this is a sentence")
assert 1 > similarity("this is a sentence", "this is also a sentence")
assert 0.8 < similarity("this is a sentence", "this is also a sentence")
| 34.875
| 76
| 0.698925
|
4a12c57b83a5a0f87b68db3fc33552eba8fa96da
| 8,286
|
py
|
Python
|
util/run_pro2cmake.py
|
chentoz/occQt
|
9738c26a18ac7757201342a69f95483d435a39fa
|
[
"MIT"
] | null | null | null |
util/run_pro2cmake.py
|
chentoz/occQt
|
9738c26a18ac7757201342a69f95483d435a39fa
|
[
"MIT"
] | null | null | null |
util/run_pro2cmake.py
|
chentoz/occQt
|
9738c26a18ac7757201342a69f95483d435a39fa
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
#############################################################################
##
## Copyright (C) 2018 The Qt Company Ltd.
## Contact: https://www.qt.io/licensing/
##
## This file is part of the plugins of the Qt Toolkit.
##
## $QT_BEGIN_LICENSE:GPL-EXCEPT$
## Commercial License Usage
## Licensees holding valid commercial Qt licenses may use this file in
## accordance with the commercial license agreement provided with the
## Software or, alternatively, in accordance with the terms contained in
## a written agreement between you and The Qt Company. For licensing terms
## and conditions see https://www.qt.io/terms-conditions. For further
## information use the contact form at https://www.qt.io/contact-us.
##
## GNU General Public License Usage
## Alternatively, this file may be used under the terms of the GNU
## General Public License version 3 as published by the Free Software
## Foundation with exceptions as appearing in the file LICENSE.GPL3-EXCEPT
## included in the packaging of this file. Please review the following
## information to ensure the GNU General Public License requirements will
## be met: https://www.gnu.org/licenses/gpl-3.0.html.
##
## $QT_END_LICENSE$
##
#############################################################################
import glob
import os
import subprocess
import concurrent.futures
import sys
import typing
import argparse
from argparse import ArgumentParser
def parse_command_line() -> argparse.Namespace:
parser = ArgumentParser(
description="Run pro2cmake on all .pro files recursively in given path. "
"You can pass additional arguments to the pro2cmake calls by appending "
"-- --foo --bar"
)
parser.add_argument(
"--only-existing",
dest="only_existing",
action="store_true",
help="Run pro2cmake only on .pro files that already have a CMakeLists.txt.",
)
parser.add_argument(
"--only-missing",
dest="only_missing",
action="store_true",
help="Run pro2cmake only on .pro files that do not have a CMakeLists.txt.",
)
parser.add_argument(
"--only-qtbase-main-modules",
dest="only_qtbase_main_modules",
action="store_true",
help="Run pro2cmake only on the main modules in qtbase.",
)
parser.add_argument(
"--skip-subdirs-projects",
dest="skip_subdirs_projects",
action="store_true",
help="Don't run pro2cmake on TEMPLATE=subdirs projects.",
)
parser.add_argument(
"--is-example",
dest="is_example",
action="store_true",
help="Run pro2cmake with --is-example flag.",
)
parser.add_argument(
"--count", dest="count", help="How many projects should be converted.", type=int
)
parser.add_argument(
"--offset",
dest="offset",
help="From the list of found projects, from which project should conversion begin.",
type=int,
)
parser.add_argument(
"path", metavar="<path>", type=str, help="The path where to look for .pro files."
)
args, unknown = parser.parse_known_args()
# Error out when the unknown arguments do not start with a "--",
# which implies passing through arguments to pro2cmake.
if len(unknown) > 0 and unknown[0] != "--":
parser.error("unrecognized arguments: {}".format(" ".join(unknown)))
else:
args.pro2cmake_args = unknown[1:]
return args
def find_all_pro_files(base_path: str, args: argparse.Namespace):
def sorter(pro_file: str) -> str:
""" Sorter that tries to prioritize main pro files in a directory. """
pro_file_without_suffix = pro_file.rsplit("/", 1)[-1][:-4]
dir_name = os.path.dirname(pro_file)
if dir_name == ".":
dir_name = os.path.basename(os.getcwd())
if dir_name.endswith(pro_file_without_suffix):
return dir_name
return dir_name + "/__" + pro_file
all_files = []
previous_dir_name: typing.Optional[str] = None
print("Finding .pro files.")
glob_result = glob.glob(os.path.join(base_path, "**/*.pro"), recursive=True)
def cmake_lists_exists_filter(path):
path_dir_name = os.path.dirname(path)
if os.path.exists(os.path.join(path_dir_name, "CMakeLists.txt")):
return True
return False
def cmake_lists_missing_filter(path):
return not cmake_lists_exists_filter(path)
def qtbase_main_modules_filter(path):
main_modules = [
"corelib",
"network",
"gui",
"widgets",
"testlib",
"printsupport",
"opengl",
"sql",
"dbus",
"concurrent",
"xml",
]
path_suffixes = [f"src/{m}/{m}.pro" for m in main_modules]
for path_suffix in path_suffixes:
if path.endswith(path_suffix):
return True
return False
filter_result = glob_result
filter_func = None
if args.only_existing:
filter_func = cmake_lists_exists_filter
elif args.only_missing:
filter_func = cmake_lists_missing_filter
elif args.only_qtbase_main_modules:
filter_func = qtbase_main_modules_filter
if filter_func:
print("Filtering.")
filter_result = [p for p in filter_result if filter_func(p)]
for pro_file in sorted(filter_result, key=sorter):
dir_name = os.path.dirname(pro_file)
if dir_name == previous_dir_name:
print("Skipping:", pro_file)
else:
all_files.append(pro_file)
previous_dir_name = dir_name
return all_files
def run(all_files: typing.List[str], pro2cmake: str, args: argparse.Namespace) -> typing.List[str]:
failed_files = []
files_count = len(all_files)
workers = os.cpu_count() or 1
if args.only_qtbase_main_modules:
# qtbase main modules take longer than usual to process.
workers = 2
with concurrent.futures.ThreadPoolExecutor(max_workers=workers, initargs=(10,)) as pool:
print("Firing up thread pool executor.")
def _process_a_file(data: typing.Tuple[str, int, int]) -> typing.Tuple[int, str, str]:
filename, index, total = data
pro2cmake_args = []
if sys.platform == "win32":
pro2cmake_args.append(sys.executable)
pro2cmake_args.append(pro2cmake)
if args.is_example:
pro2cmake_args.append("--is-example")
if args.skip_subdirs_projects:
pro2cmake_args.append("--skip-subdirs-project")
pro2cmake_args.append(os.path.basename(filename))
if args.pro2cmake_args:
pro2cmake_args += args.pro2cmake_args
result = subprocess.run(
pro2cmake_args,
cwd=os.path.dirname(filename),
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
)
stdout = f"Converted[{index}/{total}]: {filename}\n"
return result.returncode, filename, stdout + result.stdout.decode()
for return_code, filename, stdout in pool.map(
_process_a_file,
zip(all_files, range(1, files_count + 1), (files_count for _ in all_files)),
):
if return_code:
failed_files.append(filename)
print(stdout)
return failed_files
def main() -> None:
args = parse_command_line()
script_path = os.path.dirname(os.path.abspath(__file__))
pro2cmake = os.path.join(script_path, "pro2cmake.py")
base_path = args.path
all_files = find_all_pro_files(base_path, args)
if args.offset:
all_files = all_files[args.offset :]
if args.count:
all_files = all_files[: args.count]
files_count = len(all_files)
failed_files = run(all_files, pro2cmake, args)
if len(all_files) == 0:
print("No files found.")
if failed_files:
print(
f"The following files were not successfully "
f"converted ({len(failed_files)} of {files_count}):"
)
for f in failed_files:
print(f' "{f}"')
if __name__ == "__main__":
main()
| 33.546559
| 99
| 0.619961
|
4a12c61de2ec97afa270afd248661adbf3ce5f46
| 5,605
|
py
|
Python
|
ingestion/tests/integration/hive/test_hive_crud.py
|
inspire99/OpenMetadata
|
a650aea59a41ab48a9e203af091ae13a1fdf46c2
|
[
"Apache-2.0"
] | null | null | null |
ingestion/tests/integration/hive/test_hive_crud.py
|
inspire99/OpenMetadata
|
a650aea59a41ab48a9e203af091ae13a1fdf46c2
|
[
"Apache-2.0"
] | null | null | null |
ingestion/tests/integration/hive/test_hive_crud.py
|
inspire99/OpenMetadata
|
a650aea59a41ab48a9e203af091ae13a1fdf46c2
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import pytest
import requests
import socket
from metadata.ingestion.ometa.openmetadata_rest import MetadataServerConfig, OpenMetadataAPIClient
from sqlalchemy.engine import create_engine
from sqlalchemy.inspection import inspect
from metadata.generated.schema.api.data.createDatabase import (
CreateDatabaseEntityRequest,
)
from metadata.generated.schema.api.data.createTable import CreateTableEntityRequest
from metadata.generated.schema.api.services.createDatabaseService import (
CreateDatabaseServiceEntityRequest,
)
from metadata.generated.schema.entity.data.table import Column
from metadata.generated.schema.type.entityReference import EntityReference
from urllib.parse import urlparse
def is_responsive(url):
try:
response = requests.get(url)
if response.status_code == 200:
return True
except ConnectionError:
return False
def is_port_open(url):
url_parts = urlparse(url)
hostname = url_parts.hostname
port = url_parts.port
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((hostname, port))
return True
except socket.error:
return False
finally:
s.close()
def sleep(timeout_s):
print(f"sleeping for {timeout_s} seconds")
n = len(str(timeout_s))
for i in range(timeout_s, 0, -1):
print(f"{i:>{n}}", end="\r", flush=True)
time.sleep(1)
print(f"{'':>{n}}", end="\n", flush=True)
def status(r):
if r.status_code == 200 or r.status_code == 201:
return 1
else:
return 0
def create_delete_table(client):
databases = client.list_databases()
columns = [
Column(name="id", dataType="INT", dataLength=1),
Column(name="name", dataType="VARCHAR", dataLength=1),
]
table = CreateTableEntityRequest(
name="test1", columns=columns, database=databases[0].id
)
created_table = client.create_or_update_table(table)
if table.name.__root__ == created_table.name.__root__:
requests.delete(
"http://localhost:8585/api/v1/tables/{}".format(created_table.id.__root__)
)
return 1
else:
requests.delete(
"http://localhost:8585/api/v1/tables/{}".format(created_table.id.__root__)
)
return 0
def create_delete_database(client):
data = {
"jdbc": {"connectionUrl": "hive://localhost/default", "driverClass": "jdbc"},
"name": "temp_local_hive",
"serviceType": "Hive",
"description": "local hive env",
}
create_hive_service = CreateDatabaseServiceEntityRequest(**data)
hive_service = client.create_database_service(create_hive_service)
create_database_request = CreateDatabaseEntityRequest(
name="dwh", service=EntityReference(id=hive_service.id, type="databaseService")
)
created_database = client.create_database(create_database_request)
resp = create_delete_table(client)
print(resp)
client.delete_database(created_database.id.__root__)
client.delete_database_service(hive_service.id.__root__)
return resp
@pytest.fixture(scope="session")
def hive_service(docker_ip, docker_services):
"""Ensure that Docker service is up and responsive."""
port = docker_services.port_for("hive-server", 10000)
print(f"HIVE is running on port {port}")
timeout_s = 120
sleep(timeout_s)
url = "hive://localhost:10000/"
docker_services.wait_until_responsive(
timeout=timeout_s, pause=0.1, check=lambda: is_port_open(url)
)
engine = create_engine(url)
inspector = inspect(engine)
return inspector
def test_check_schema(hive_service):
inspector = hive_service
schemas = []
for schema in inspector.get_schema_names():
schemas.append(schema)
if "default" in schemas:
assert 1
else:
assert 0
def test_read_tables(hive_service):
inspector = hive_service
check_tables = [
"metadata_array_struct_test",
"metadata_struct_test",
"metadata_test_table",
"test_check",
]
tables = []
for schema in inspector.get_schema_names():
for table in inspector.get_table_names(schema):
tables.append(table)
if set(tables) == set(check_tables):
assert 1
else:
assert 0
def test_check_table():
is_responsive("http://localhost:8585/api/v1/health-check")
metadata_config = MetadataServerConfig.parse_obj(
{"api_endpoint": "http://localhost:8585/api", "auth_provider_type": "no-auth"}
)
client = OpenMetadataAPIClient(metadata_config)
databases = client.list_databases()
if len(databases) > 0:
assert create_delete_table(client)
else:
assert create_delete_database(client)
| 32.970588
| 98
| 0.694737
|
4a12c67f40a18535274b517b880d3de3b075acdc
| 2,025
|
py
|
Python
|
talleres_inov_docente/figures/plot_rbf_svm_parameters.py
|
jfcaballero/Tutorial-sobre-scikit-learn-abreviado
|
1e2aa1f9132c277162135a5463068801edab8d15
|
[
"CC0-1.0"
] | 311
|
2017-03-01T12:55:03.000Z
|
2022-03-05T08:51:48.000Z
|
talleres_inov_docente/figures/plot_rbf_svm_parameters.py
|
jfcaballero/Tutorial-sobre-scikit-learn-abreviado
|
1e2aa1f9132c277162135a5463068801edab8d15
|
[
"CC0-1.0"
] | 21
|
2017-05-04T19:29:22.000Z
|
2018-06-19T15:37:21.000Z
|
talleres_inov_docente/figures/plot_rbf_svm_parameters.py
|
jfcaballero/Tutorial-sobre-scikit-learn-abreviado
|
1e2aa1f9132c277162135a5463068801edab8d15
|
[
"CC0-1.0"
] | 249
|
2017-03-02T13:22:44.000Z
|
2022-02-14T20:34:45.000Z
|
import matplotlib.pyplot as plt
import numpy as np
from sklearn.svm import SVC
from sklearn.datasets import make_blobs
from .plot_2d_separator import plot_2d_separator
def make_handcrafted_dataset():
# a carefully hand-designed dataset lol
X, y = make_blobs(centers=2, random_state=4, n_samples=30)
y[np.array([7, 27])] = 0
mask = np.ones(len(X), dtype=np.bool)
mask[np.array([0, 1, 5, 26])] = 0
X, y = X[mask], y[mask]
return X, y
def plot_rbf_svm_parameters():
X, y = make_handcrafted_dataset()
fig, axes = plt.subplots(1, 3, figsize=(12, 4))
for ax, C in zip(axes, [1e0, 5, 10, 100]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(kernel='rbf', C=C).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("C = %f" % C)
fig, axes = plt.subplots(1, 4, figsize=(15, 3))
for ax, gamma in zip(axes, [0.1, .5, 1, 10]):
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
svm = SVC(gamma=gamma, kernel='rbf', C=1).fit(X, y)
plot_2d_separator(svm, X, ax=ax, eps=.5)
ax.set_title("gamma = %f" % gamma)
def plot_svm(log_C, log_gamma):
X, y = make_handcrafted_dataset()
C = 10. ** log_C
gamma = 10. ** log_gamma
svm = SVC(kernel='rbf', C=C, gamma=gamma).fit(X, y)
plt.figure()
ax = plt.gca()
plot_2d_separator(svm, X, ax=ax, eps=.5)
# plot data
ax.scatter(X[:, 0], X[:, 1], s=150, c=np.array(['red', 'blue'])[y])
# plot support vectors
sv = svm.support_vectors_
ax.scatter(sv[:, 0], sv[:, 1], s=230, facecolors='none', zorder=10, linewidth=3)
ax.set_title("C = %.4f gamma = %.4f" % (C, gamma))
def plot_svm_interactive():
from ipywidgets import interactive, FloatSlider
C_slider = FloatSlider(min=-3, max=3, step=.1, value=0, readout=False)
gamma_slider = FloatSlider(min=-2, max=2, step=.1, value=0, readout=False)
return interactive(plot_svm, log_C=C_slider, log_gamma=gamma_slider)
| 34.913793
| 84
| 0.609877
|
4a12c6ae38003887da6260d273712372532c24b0
| 165
|
py
|
Python
|
GRADE 9/Python/BraydenViana-Python-Video14.py
|
i1470s/School-Work
|
e00843f3506b2ad674dce5e47ce3321002cc23e5
|
[
"MIT"
] | null | null | null |
GRADE 9/Python/BraydenViana-Python-Video14.py
|
i1470s/School-Work
|
e00843f3506b2ad674dce5e47ce3321002cc23e5
|
[
"MIT"
] | null | null | null |
GRADE 9/Python/BraydenViana-Python-Video14.py
|
i1470s/School-Work
|
e00843f3506b2ad674dce5e47ce3321002cc23e5
|
[
"MIT"
] | null | null | null |
def get_gender(sex='unknown'):
if sex == 'm':
sex = 'male'
elif sex == 'f':
sex = 'female'
print(sex)
get_gender('m')
get_gender('f')
get_gender()
| 15
| 31
| 0.563636
|
4a12c73aedc1375af8edfce9e762b7270b2f5ddd
| 2,346
|
py
|
Python
|
gameplay/connection_handler.py
|
espirin/svoya
|
35168387c737dad5d2acc87b775d4e0a38923f2f
|
[
"MIT"
] | null | null | null |
gameplay/connection_handler.py
|
espirin/svoya
|
35168387c737dad5d2acc87b775d4e0a38923f2f
|
[
"MIT"
] | null | null | null |
gameplay/connection_handler.py
|
espirin/svoya
|
35168387c737dad5d2acc87b775d4e0a38923f2f
|
[
"MIT"
] | null | null | null |
from flask_login import current_user
from flask_socketio import join_room, leave_room
from app import socketio, db
from auth.auth import authenticated_only
from gameplay.state_handler import update_clients
from model import Game, User
@socketio.on('connect_player', namespace='/player')
@authenticated_only
def connect_player(game_id):
game = Game.query.filter(Game.id == game_id).first()
if game is None:
return "Игра не существует"
# Add player to the players list
game.players.append(current_user)
db.session.commit()
# Create score entry for the player
if current_user.username not in game.scores:
game.scores[current_user.username] = 0
db.session.commit()
# Add player to the current room
join_room(game_id)
update_clients(game_id)
print(f"Player {current_user.username} connected to game {game_id}")
@socketio.on('disconnect', namespace='/player')
@authenticated_only
def disconnect_player():
game = Game.query.filter(Game.players.any(User.username == current_user.username)).first()
if game is not None: # Check if the game was exited from another browser tab
game.players.remove(current_user)
db.session.commit()
leave_room(game.id)
update_clients(game.id)
print(f"Player {current_user.username} disconnected from game {game.id}")
@socketio.on('connect_host', namespace='/host')
@authenticated_only
def connect_host(game_id):
game = Game.query.filter(Game.id == game_id).first()
if game is None:
return "Игра не существует"
if game.host is not None:
return "Место ведущего уже занято"
game.host = current_user
db.session.commit()
join_room(game_id)
update_clients(game_id)
print(f"Host {current_user.username} connected to game {game_id}")
@socketio.on('disconnect', namespace='/host')
@authenticated_only
def disconnect_host():
game = Game.query.filter(Game.host == current_user).first()
if game is not None: # Check if the game was exited from another browser tab
game.host = None
db.session.commit()
leave_room(game.id)
update_clients(game.id)
print(f"Host {current_user.username} disconnected from game {game.id}")
@socketio.on('ping', namespace='/')
@authenticated_only
def ping():
return "pong"
| 29.325
| 94
| 0.706309
|
4a12ca2edf09677273648a4b8bfd3a6b231e9133
| 2,917
|
py
|
Python
|
core/main.py
|
meteosangue/meteosangue
|
dc5fb27cfd9060b909907a0c68066dc2f3ca1bd3
|
[
"MIT"
] | 6
|
2016-09-23T08:41:36.000Z
|
2021-12-13T21:03:43.000Z
|
core/main.py
|
meteosangue/meteosangue
|
dc5fb27cfd9060b909907a0c68066dc2f3ca1bd3
|
[
"MIT"
] | 17
|
2016-10-15T18:36:19.000Z
|
2022-03-11T23:15:57.000Z
|
core/main.py
|
meteosangue/meteosangue
|
dc5fb27cfd9060b909907a0c68066dc2f3ca1bd3
|
[
"MIT"
] | 3
|
2016-09-23T08:41:37.000Z
|
2020-02-01T03:52:16.000Z
|
# -*- coding: utf-8 -*-
import os
import time
import tweepy
from django.conf import settings
from django.core.files import File
from django.db.models import Q
from datetime import datetime
from lxml import html
from tempfile import NamedTemporaryFile
from selenium import webdriver
from .exceptions import MeteoSangueException
from .models import BloodGroup, Log
from .posters import tweet_status, telegram_status, facebook_status
from .posters_register import posters_register
from .utils import crs_to_date
"""
Register posters
"""
posters_register.register_poster(tweet_status, 'twitter_done')
posters_register.register_poster(telegram_status, 'telegram_done')
posters_register.register_poster(facebook_status, 'facebook_done')
"""
Method to format blood groups status
"""
def get_blood_group_list(blood_groups, icon, group_status, group_desc):
blood_groups_for_status = blood_groups.values_list('groupid', flat=True).filter(status=group_status)
if len(blood_groups_for_status):
return '{0} {1}: {2}\n'.format(icon, group_desc, ' , '.join(blood_groups_for_status))
else:
return ''
"""
Method to post blood weather on social
"""
def post_blood_weather(blood_groups, log):
status = ''
status += get_blood_group_list(blood_groups, '🚨', 'Z', 'Emergenza')
status += get_blood_group_list(blood_groups, '🆘', 'U', 'Urgente')
status += get_blood_group_list(blood_groups, '💜', 'F', 'Fragile')
status += get_blood_group_list(blood_groups, '💚', 'S', 'Stabile')
status += get_blood_group_list(blood_groups, '💛', 'E', 'Eccedenza')
posters_register.run(status, log)
log.save()
"""
Method to fetch blood groups
"""
def update_blood_groups():
driver = webdriver.PhantomJS()
driver.implicitly_wait(10)
driver.get("https://web2.e.toscana.it/crs/meteo/")
time.sleep(settings.FETCH_SITE_WAIT)
f = NamedTemporaryFile(delete=False)
driver.set_window_size(450, 650)
time.sleep(settings.FETCH_SITE_WAIT)
driver.save_screenshot(f.name)
tree = html.fromstring(driver.page_source)
driver.quit()
groups = tree.xpath('//input[@type="hidden"]')
update_time = crs_to_date(tree.xpath('//div[@id="aggiornamento"]/text()')[0])
log = None
if os.path.getsize(f.name) > 3000:
log, created = Log.objects.get_or_create(datetime=update_time)
if created:
Log.objects.filter(~Q(datetime=update_time)).delete()
log.image.save(
update_time.strftime("%Y-%m-%d_%H:%M:%S") + '.png',
File(f)
)
os.unlink(f.name)
for group in groups:
group_id = group.name.replace('N', '-').replace('P', '+')
dbgroup, created = BloodGroup.objects.get_or_create(groupid=group_id)
dbgroup.status = group.value
dbgroup.save()
f.close()
return BloodGroup.objects.all(), log
| 31.706522
| 104
| 0.686664
|
4a12ca312a4897f57b2b429725d12b986022e6db
| 1,044
|
py
|
Python
|
authentik/providers/oauth2/migrations/0005_auto_20200920_1240.py
|
BeryJu/passbook
|
350f0d836580f4411524614f361a76c4f27b8a2d
|
[
"MIT"
] | 15
|
2020-01-05T09:09:57.000Z
|
2020-11-28T05:27:39.000Z
|
authentik/providers/oauth2/migrations/0005_auto_20200920_1240.py
|
BeryJu/passbook
|
350f0d836580f4411524614f361a76c4f27b8a2d
|
[
"MIT"
] | 302
|
2020-01-21T08:03:59.000Z
|
2020-12-04T05:04:57.000Z
|
authentik/providers/oauth2/migrations/0005_auto_20200920_1240.py
|
BeryJu/passbook
|
350f0d836580f4411524614f361a76c4f27b8a2d
|
[
"MIT"
] | 3
|
2020-03-04T08:21:59.000Z
|
2020-08-01T20:37:18.000Z
|
# Generated by Django 3.1.1 on 2020-09-20 12:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
(
"authentik_providers_oauth2",
"0004_remove_oauth2provider_post_logout_redirect_uris",
),
]
operations = [
migrations.AlterField(
model_name="oauth2provider",
name="response_type",
field=models.TextField(
choices=[
("code", "code (Authorization Code Flow)"),
("id_token", "id_token (Implicit Flow)"),
("id_token token", "id_token token (Implicit Flow)"),
("code token", "code token (Hybrid Flow)"),
("code id_token", "code id_token (Hybrid Flow)"),
("code id_token token", "code id_token token (Hybrid Flow)"),
],
default="code",
help_text="Response Type required by the client.",
),
),
]
| 31.636364
| 81
| 0.518199
|
4a12cbd986762475951d01c43b1db1f34a124497
| 3,039
|
py
|
Python
|
backend/monitor/services.py
|
davidpalves/repository-monitor
|
0be5fbf1d5d404aa9e4952a0f02a44f1662efa91
|
[
"MIT"
] | 1
|
2019-11-26T11:36:20.000Z
|
2019-11-26T11:36:20.000Z
|
backend/monitor/services.py
|
davidpalves/repository-monitor
|
0be5fbf1d5d404aa9e4952a0f02a44f1662efa91
|
[
"MIT"
] | 11
|
2021-03-10T00:05:55.000Z
|
2022-02-26T20:35:53.000Z
|
backend/monitor/services.py
|
davidpierre21/repository-monitor
|
0be5fbf1d5d404aa9e4952a0f02a44f1662efa91
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timedelta
from github import Github, UnknownObjectException, GithubException
from rest_framework.exceptions import ValidationError, NotFound
from django.conf import settings
from .models import Repository, Author, Commit
def create_repository(user, full_repository_name):
github = Github(user.github.access_token)
try:
name = full_repository_name.split('/')[1]
owner = full_repository_name.split('/')[0]
if Repository.objects.filter(name=name,
owner_login=owner,
users__username=user.username):
raise ValidationError(
f'The repository {name} was already added.')
retrieved_repository = github.get_user(owner).get_repo(name)
create_webhook(name=name, user=user)
repository = Repository.objects.create(
full_name=retrieved_repository.full_name,
name=retrieved_repository.name,
description=retrieved_repository.description,
owner_login=retrieved_repository.owner.login,
url=retrieved_repository.html_url
)
last_month = datetime.today() - timedelta(days=30)
commits = retrieved_repository.get_commits(since=last_month)
commits_list = []
for commit in commits:
author, _ = Author.objects.get_or_create(
name=commit.commit.author.name,
email=commit.commit.author.email,
)
commit_to_be_added = Commit(
repository=repository,
author=author,
sha=commit.sha,
message=commit.commit.message,
date=commit.commit.author.date,
url=commit. html_url
)
commits_list.append(commit_to_be_added)
Commit.objects.bulk_create(commits_list)
repository.users.add(user)
return repository
except IndexError:
raise ValidationError('Repository name not in the correct format.')
except UnknownObjectException:
raise NotFound('Repository not found.')
def create_webhook(user, name):
github = Github(user.github.access_token)
try:
hook_configs = {}
hook_configs['url'] = settings.APP_BASE_URL + '/hooks/'
hook_configs['content_type'] = 'json'
hook_configs['secret'] = settings.GITHUB_WEBHOOK_KEY
retrieved_repository = github.get_user().get_repo(name)
retrieved_repository.create_hook(
name="web",
config=hook_configs,
events=["push"],
active=True
)
except GithubException as ex:
print(ex.data)
if 'errors' in ex.data:
for error in ex.data['errors']:
if error['message'] == 'Hook already exists on this repository':
return
raise NotFound("Could not create webhook.\
Please, check your repository permissions")
| 31.65625
| 80
| 0.615992
|
4a12cdfed63c288397baca124216afbf0492dff5
| 23,585
|
py
|
Python
|
indico/core/db/sqlalchemy/protection.py
|
wasm-network/indico
|
6aeebc18acd642a3304f0afb0493dcfbdd7addbb
|
[
"MIT"
] | null | null | null |
indico/core/db/sqlalchemy/protection.py
|
wasm-network/indico
|
6aeebc18acd642a3304f0afb0493dcfbdd7addbb
|
[
"MIT"
] | null | null | null |
indico/core/db/sqlalchemy/protection.py
|
wasm-network/indico
|
6aeebc18acd642a3304f0afb0493dcfbdd7addbb
|
[
"MIT"
] | null | null | null |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import has_request_context, session
from sqlalchemy import inspect
from sqlalchemy.event import listens_for
from sqlalchemy.ext.declarative import declared_attr
from sqlalchemy.ext.hybrid import hybrid_property
from sqlalchemy.orm.base import NEVER_SET, NO_VALUE
from indico.core import signals
from indico.core.db import db
from indico.core.db.sqlalchemy import PyIntEnum
from indico.core.db.sqlalchemy.principals import EmailPrincipal, PrincipalType
from indico.core.permissions import get_available_permissions
from indico.util.caching import memoize_request
from indico.util.i18n import _
from indico.util.signals import values_from_signal
from indico.util.struct.enum import RichIntEnum
from indico.util.user import iter_acl
from indico.web.util import jsonify_template
class ProtectionMode(RichIntEnum):
__titles__ = [_('Public'), _('Inheriting'), _('Protected')]
public = 0
inheriting = 1
protected = 2
class ProtectionMixin(object):
#: The protection modes that are not allowed. Can be overridden
#: in the model that is using the mixin. Affects the table
#: structure, so any changes to it should go along with a migration
#: step! By default, the `public` mode is disallowed since it does
#: not make much sense in most cases to make something public even
#: though its parent object is private (or inheriting).
disallowed_protection_modes = frozenset({ProtectionMode.public})
#: The default protection mode a new object has
default_protection_mode = ProtectionMode.inheriting
#: Whether objects with inheriting protection may have their own
#: ACL entries (which will grant access even if the user cannot
#: access the parent object).
inheriting_have_acl = False
#: Whether the object can have an access key that grants read access
allow_access_key = False
#: Whether the object can have contact information shown in case of
#: no access
allow_no_access_contact = False
@classmethod
def register_protection_events(cls):
"""Registers sqlalchemy events needed by this mixin.
Call this method after the definition of a model which uses
this mixin class.
"""
@listens_for(cls.protection_mode, 'set')
def _set_protection_mode(target, value, oldvalue, *unused):
if oldvalue in (NEVER_SET, NO_VALUE):
return
if value != oldvalue:
signals.acl.protection_changed.send(type(target), obj=target, mode=value, old_mode=oldvalue)
@declared_attr
def protection_mode(cls):
return db.Column(
PyIntEnum(ProtectionMode, exclude_values=cls.disallowed_protection_modes),
nullable=False,
default=cls.default_protection_mode
)
@declared_attr
def access_key(cls):
if cls.allow_access_key:
return db.Column(
db.String,
nullable=False,
default=''
)
@declared_attr
def own_no_access_contact(cls):
if cls.allow_no_access_contact:
return db.Column(
'no_access_contact',
db.String,
nullable=False,
default=''
)
@property
def no_access_contact(self):
return (self.own_no_access_contact
if self.own_no_access_contact or not self.protection_parent
else self.protection_parent.no_access_contact)
@hybrid_property
def is_public(self):
return self.protection_mode == ProtectionMode.public
@hybrid_property
def is_inheriting(self):
return self.protection_mode == ProtectionMode.inheriting
@hybrid_property
def is_self_protected(self):
"""Checks whether the object itself is protected.
If you also care about inherited protection from a parent,
use `is_protected` instead.
"""
return self.protection_mode == ProtectionMode.protected
@property
def is_protected(self):
"""
Checks whether ths object is protected, either by itself or
by inheriting from a protected object.
"""
if self.is_inheriting:
return self.protection_parent.is_protected
else:
return self.is_self_protected
@property
def protection_repr(self):
protection_mode = self.protection_mode.name if self.protection_mode is not None else None
return 'protection_mode={}'.format(protection_mode)
@property
def protection_parent(self):
"""The parent object to consult for ProtectionMode.inheriting"""
raise NotImplementedError
def _check_can_access_override(self, user, allow_admin, authorized=None):
# Trigger signals for protection overrides
rv = values_from_signal(signals.acl.can_access.send(type(self), obj=self, user=user, allow_admin=allow_admin,
authorized=authorized),
single_value=True)
# in case of contradictory results (shouldn't happen at all)
# we stay on the safe side and deny access
return all(rv) if rv else None
@staticmethod
def is_user_admin(user):
return user.is_admin
@memoize_request
def can_access(self, user, allow_admin=True):
"""Checks if the user can access the object.
:param user: The :class:`.User` to check. May be None if the
user is not logged in.
:param allow_admin: If admin users should always have access
"""
override = self._check_can_access_override(user, allow_admin=allow_admin)
if override is not None:
return override
# Usually admins can access everything, so no need for checks
if allow_admin and user and type(self).is_user_admin(user):
rv = True
# If there's a valid access key we can skip all other ACL checks
elif self.allow_access_key and self.check_access_key():
rv = True
elif self.protection_mode == ProtectionMode.public:
# if it's public we completely ignore the parent protection
# this is quite ugly which is why it should only be allowed
# in rare cases (e.g. events which might be in a protected
# category but should be public nonetheless)
rv = True
elif self.protection_mode == ProtectionMode.protected:
# if it's protected, we also ignore the parent protection
# and only check our own ACL
if any(user in entry.principal for entry in iter_acl(self.acl_entries)):
rv = True
elif isinstance(self, ProtectionManagersMixin):
rv = self.can_manage(user, allow_admin=allow_admin)
else:
rv = False
elif self.protection_mode == ProtectionMode.inheriting:
# if it's inheriting, we only check the parent protection
# unless `inheriting_have_acl` is set, in which case we
# might not need to check the parents at all
if self.inheriting_have_acl and any(user in entry.principal for entry in iter_acl(self.acl_entries)):
rv = True
else:
# the parent can be either an object inheriting from this
# mixin or a legacy object with an AccessController
parent = self.protection_parent
if parent is None:
# This should be the case for the top-level object,
# i.e. the root category, which shouldn't allow
# ProtectionMode.inheriting as it makes no sense.
raise TypeError('protection_parent of {} is None'.format(self))
elif hasattr(parent, 'can_access'):
rv = parent.can_access(user, allow_admin=allow_admin)
else:
raise TypeError('protection_parent of {} is of invalid type {} ({})'.format(self, type(parent),
parent))
else:
# should never happen, but since this is a sensitive area
# we better fail loudly if we have garbage
raise ValueError('Invalid protection mode: {}'.format(self.protection_mode))
override = self._check_can_access_override(user, allow_admin=allow_admin, authorized=rv)
return override if override is not None else rv
def check_access_key(self, access_key=None):
"""Check whether an access key is valid for the object.
:param access_key: Use the given access key instead of taking
it from the session.
"""
assert self.allow_access_key
if not self.access_key:
return False
if access_key is None:
if not has_request_context():
return False
access_key = session.get('access_keys', {}).get(self._access_key_session_key)
if not access_key:
return False
return self.access_key == access_key
def set_session_access_key(self, access_key):
"""Store an access key for the object in the session.
:param access_key: The access key to store. It is not checked
for validity.
"""
assert self.allow_access_key
session.setdefault('access_keys', {})[self._access_key_session_key] = access_key
session.modified = True
@property
def _access_key_session_key(self):
cls, pks = inspect(self).identity_key[:2]
return '{}-{}'.format(cls.__name__, '-'.join(map(unicode, pks)))
def update_principal(self, principal, read_access=None, quiet=False):
"""Updates access privileges for the given principal.
:param principal: A `User`, `GroupProxy` or `EmailPrincipal` instance.
:param read_access: If the principal should have explicit read
access to the object.
:param quiet: Whether the ACL change should happen silently.
This indicates to acl change signal handlers
that the change should not be logged, trigger
emails or result in similar notifications.
:return: The ACL entry for the given principal or ``None`` if
he was removed (or not added).
"""
principal = _resolve_principal(principal)
principal_class, entry = _get_acl_data(self, principal)
if entry is None and read_access:
entry = principal_class(principal=principal)
self.acl_entries.add(entry)
signals.acl.entry_changed.send(type(self), obj=self, principal=principal, entry=entry, is_new=True,
old_data=None, quiet=quiet)
return entry
elif entry is not None and not read_access:
self.acl_entries.remove(entry)
# Flush in case the same principal is added back afterwards.
# Not flushing in other cases (adding/modifying) is intentional
# as this might happen on a newly created object which is not yet
# flushable due to missing data
db.session.flush()
signals.acl.entry_changed.send(type(self), obj=self, principal=principal, entry=None, is_new=False,
old_data=None, quiet=quiet)
return None
return entry
def remove_principal(self, principal, quiet=False):
"""Revokes all access privileges for the given principal.
This method doesn't do anything if the user is not in the
object's ACL.
:param principal: A `User`, `GroupProxy` or `EmailPrincipal` instance.
:param quiet: Whether the ACL change should happen silently.
This indicates to acl change signal handlers
that the change should not be logged, trigger
emails or result in similar notifications.
"""
principal = _resolve_principal(principal)
entry = _get_acl_data(self, principal)[1]
if entry is not None:
signals.acl.entry_changed.send(type(self), obj=self, principal=principal, entry=None, is_new=False,
old_data=entry.current_data, quiet=quiet)
self.acl_entries.remove(entry)
def get_inherited_acl(self):
own_acl = {entry.principal for entry in self.acl_entries}
parent_acl = self.protection_parent.get_access_list(skip_managers=True)
return [x for x in parent_acl if x not in own_acl]
class ProtectionManagersMixin(ProtectionMixin):
@property
def all_manager_emails(self):
"""Return the emails of all managers"""
# We ignore email principals here. They never signed up in indico anyway...
return {p.principal.email
for p in self.acl_entries
if p.type == PrincipalType.user and p.has_management_permission()}
@memoize_request
def can_manage(self, user, permission=None, allow_admin=True, check_parent=True, explicit_permission=False):
"""Checks if the user can manage the object.
:param user: The :class:`.User` to check. May be None if the
user is not logged in.
:param: permission: The management permission that is needed for
the check to succeed. If not specified, full
management privs are required. May be set to
the string ``'ANY'`` to check if the user has
any management privileges. If the user has
`full_access` privileges, he's assumed to have
all possible permissions.
:param allow_admin: If admin users should always have access
:param check_parent: If the parent object should be checked.
In this case the permission is ignored; only
full management access is inherited to
children.
:param explicit_permission: If the specified permission should be checked
explicitly instead of short-circuiting
the check for Indico admins or managers.
When this option is set to ``True``, the
values of `allow_admin` and `check_parent`
are ignored. This also applies if `permission`
is None in which case this argument being
set to ``True`` is equivalent to
`allow_admin` and `check_parent` being set
to ``False``.
"""
if permission is not None and permission != 'ANY' and permission not in get_available_permissions(type(self)):
raise ValueError("permission '{}' is not valid for '{}' objects".format(permission, type(self).__name__))
if user is None:
# An unauthorized user is never allowed to perform management operations.
# Not even signals may override this since management code generally
# expects session.user to be not None.
return False
# Trigger signals for protection overrides
rv = values_from_signal(signals.acl.can_manage.send(type(self), obj=self, user=user, permission=permission,
allow_admin=allow_admin, check_parent=check_parent,
explicit_permission=explicit_permission),
single_value=True)
if rv:
# in case of contradictory results (shouldn't happen at all)
# we stay on the safe side and deny access
return all(rv)
# Usually admins can access everything, so no need for checks
if not explicit_permission and allow_admin and type(self).is_user_admin(user):
return True
if any(user in entry.principal
for entry in iter_acl(self.acl_entries)
if entry.has_management_permission(permission,
explicit=(explicit_permission and permission is not None))):
return True
if not check_parent or explicit_permission:
return False
# the parent can be either an object inheriting from this
# mixin or a legacy object with an AccessController
parent = self.protection_parent
if parent is None:
# This should be the case for the top-level object,
# i.e. the root category
return False
elif hasattr(parent, 'can_manage'):
return parent.can_manage(user, allow_admin=allow_admin)
else:
raise TypeError('protection_parent of {} is of invalid type {} ({})'.format(self, type(parent), parent))
def update_principal(self, principal, read_access=None, full_access=None, permissions=None, add_permissions=None,
del_permissions=None, quiet=False):
"""Updates access privileges for the given principal.
If the principal is not in the ACL, it will be added if
necessary. If the changes remove all its privileges, it
will be removed from the ACL.
:param principal: A `User`, `GroupProxy` or `EmailPrincipal` instance.
:param read_access: If the principal should have explicit read
access to the object. This does not grant
any management permissions - it simply
grants access to an otherwise protected
object.
:param full_access: If the principal should have full management
access.
:param permissions: set -- The management permissions to grant.
Any existing permissions will be replaced.
:param add_permissions: set -- Management permissions to add.
:param del_permissions: set -- Management permissions to remove.
:param quiet: Whether the ACL change should happen silently.
This indicates to acl change signal handlers
that the change should not be logged, trigger
emails or result in similar notifications.
:return: The ACL entry for the given principal or ``None`` if
he was removed (or not added).
"""
if permissions is not None and (add_permissions or del_permissions):
raise ValueError('add_permissions/del_permissions and permissions are mutually exclusive')
principal = _resolve_principal(principal)
principal_class, entry = _get_acl_data(self, principal)
new_entry = False
if entry is None:
if not permissions and not add_permissions and not full_access and not read_access:
# not in ACL and no permissions to add
return None
entry = principal_class(principal=principal, read_access=False, full_access=False, permissions=[])
self.acl_entries.add(entry)
new_entry = True
old_data = entry.current_data
# update permissions
new_permissions = set(entry.permissions)
if permissions is not None:
new_permissions = permissions
else:
if add_permissions:
new_permissions |= add_permissions
if del_permissions:
new_permissions -= del_permissions
invalid_permissions = new_permissions - get_available_permissions(type(self)).viewkeys()
if invalid_permissions:
raise ValueError('Invalid permissions: {}'.format(', '.join(invalid_permissions)))
entry.permissions = sorted(new_permissions)
# update read privs
if read_access is not None:
entry.read_access = read_access
# update full management privs
if full_access is not None:
entry.full_access = full_access
# remove entry from acl if no privileges
if not entry.read_access and not entry.full_access and not entry.permissions:
self.acl_entries.remove(entry)
# Flush in case the same principal is added back afterwards.
# Not flushing in other cases (adding/modifying) is intentional
# as this might happen on a newly created object which is not yet
# flushable due to missing data
db.session.flush()
signals.acl.entry_changed.send(type(self), obj=self, principal=principal, entry=None, is_new=False,
old_data=old_data, quiet=quiet)
return None
signals.acl.entry_changed.send(type(self), obj=self, principal=principal, entry=entry, is_new=new_entry,
old_data=old_data, quiet=quiet)
return entry
def get_manager_list(self, recursive=False):
managers = {x.principal for x in self.acl_entries if x.has_management_permission()}
if recursive and self.protection_parent:
managers.update(self.protection_parent.get_manager_list(recursive=True))
return managers
def get_access_list(self, skip_managers=False, skip_self_acl=False):
read_access_list = {x.principal for x in self.acl_entries if x.read_access} if not skip_self_acl else set()
if self.is_self_protected:
return (read_access_list | self.get_manager_list(recursive=True)) if not skip_managers else read_access_list
elif self.is_inheriting and self.is_protected:
access_list = (read_access_list | self.get_manager_list()) if not skip_managers else read_access_list
if self.protection_parent:
access_list.update(self.protection_parent.get_access_list(skip_managers=skip_managers))
return access_list
else:
return set()
def _get_acl_data(obj, principal):
"""Helper function to get the necessary data for ACL modifications
:param obj: A `ProtectionMixin` instance
:param principal: A User or GroupProxy uinstance
:return: A tuple containing the principal class and the existing
ACL entry for the given principal if it exists.
"""
principal_class = type(obj).acl_entries.prop.mapper.class_
entry = next((x for x in obj.acl_entries if x.principal == principal), None)
return principal_class, entry
def _resolve_principal(principal):
"""Helper function to convert an email principal to a user if possible
:param principal: A `User`, `GroupProxy` or `EmailPrincipal` instance.
"""
if isinstance(principal, EmailPrincipal):
return principal.user or principal
return principal
def render_acl(obj):
return jsonify_template('_access_list.html', acl=obj.get_inherited_acl())
| 46.518738
| 120
| 0.628662
|
4a12ce7c1317cf3c99e50aa1a07f486f7bf85006
| 251
|
py
|
Python
|
CursoemVideo2/Desafio 7.py
|
JefAlmeida1/Python
|
2a615cf17491739f6a988abff9f4fe7be0ef7b9a
|
[
"MIT"
] | null | null | null |
CursoemVideo2/Desafio 7.py
|
JefAlmeida1/Python
|
2a615cf17491739f6a988abff9f4fe7be0ef7b9a
|
[
"MIT"
] | null | null | null |
CursoemVideo2/Desafio 7.py
|
JefAlmeida1/Python
|
2a615cf17491739f6a988abff9f4fe7be0ef7b9a
|
[
"MIT"
] | null | null | null |
#Desenvolva um programa que leia as duas nota de um aluno, calcule e mostre a sua média.
n1 = float(input('Primeira nota: '))
n2 = float(input('Segunda nota: '))
m = (n1 + n2) / 2
print('O calculo entre as notas {} e {}, a média é {}'.format(n1,n2,m))
| 50.2
| 88
| 0.661355
|
4a12cf1bb52dc4179bc69238aed9731515097a4d
| 13,004
|
py
|
Python
|
pysnmp/HH3C-QINQ-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 11
|
2021-02-02T16:27:16.000Z
|
2021-08-31T06:22:49.000Z
|
pysnmp/HH3C-QINQ-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 75
|
2021-02-24T17:30:31.000Z
|
2021-12-08T00:01:18.000Z
|
pysnmp/HH3C-QINQ-MIB.py
|
agustinhenze/mibs.snmplabs.com
|
1fc5c07860542b89212f4c8ab807057d9a9206c7
|
[
"Apache-2.0"
] | 10
|
2019-04-30T05:51:36.000Z
|
2022-02-16T03:33:41.000Z
|
#
# PySNMP MIB module HH3C-QINQ-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/HH3C-QINQ-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 19:16:24 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, OctetString, Integer = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "OctetString", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsUnion, ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsUnion", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint")
hh3cCommon, = mibBuilder.importSymbols("HH3C-OID-MIB", "hh3cCommon")
ifIndex, = mibBuilder.importSymbols("IF-MIB", "ifIndex")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
MibIdentifier, Integer32, Gauge32, Counter64, TimeTicks, NotificationType, MibScalar, MibTable, MibTableRow, MibTableColumn, iso, Bits, Unsigned32, ModuleIdentity, Counter32, IpAddress, ObjectIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "MibIdentifier", "Integer32", "Gauge32", "Counter64", "TimeTicks", "NotificationType", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "iso", "Bits", "Unsigned32", "ModuleIdentity", "Counter32", "IpAddress", "ObjectIdentity")
TextualConvention, DisplayString, RowStatus = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString", "RowStatus")
hh3cQINQ = ModuleIdentity((1, 3, 6, 1, 4, 1, 25506, 2, 69))
hh3cQINQ.setRevisions(('2006-03-10 00:00',))
if mibBuilder.loadTexts: hh3cQINQ.setLastUpdated('200603100000Z')
if mibBuilder.loadTexts: hh3cQINQ.setOrganization('Hangzhou H3C Tech. Co., Ltd.')
class Hh3cQinQSwitchState(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2))
namedValues = NamedValues(("enabled", 1), ("disabled", 2))
hh3cQinQMibObject = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1))
hh3cQinQGlobalConfigGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 1))
hh3cQinQBpduTunnelSwitch = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 1, 1), Hh3cQinQSwitchState().clone('enabled')).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hh3cQinQBpduTunnelSwitch.setStatus('current')
hh3cQinQEthernetTypeValue = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(33024)).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hh3cQinQEthernetTypeValue.setStatus('current')
hh3cQinQServiceTPIDValue = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 1, 3), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hh3cQinQServiceTPIDValue.setStatus('current')
hh3cQinQCustomerTPIDValue = MibScalar((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readwrite")
if mibBuilder.loadTexts: hh3cQinQCustomerTPIDValue.setStatus('current')
hh3cQinQBpduTunnelTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 2), )
if mibBuilder.loadTexts: hh3cQinQBpduTunnelTable.setStatus('current')
hh3cQinQBpduTunnelEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 2, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HH3C-QINQ-MIB", "hh3cQinQProtocolIndex"))
if mibBuilder.loadTexts: hh3cQinQBpduTunnelEntry.setStatus('current')
hh3cQinQProtocolIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 2, 1, 1), Integer32().subtype(subtypeSpec=ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))).clone(namedValues=NamedValues(("bpdu", 1), ("stp", 2), ("gmosaic", 3), ("igmp", 4))))
if mibBuilder.loadTexts: hh3cQinQProtocolIndex.setStatus('current')
hh3cQinQBpduRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 2, 1, 2), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQBpduRowStatus.setStatus('current')
hh3cQinQPriorityRemarkTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 3), )
if mibBuilder.loadTexts: hh3cQinQPriorityRemarkTable.setStatus('current')
hh3cQinQPriorityRemarkEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 3, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HH3C-QINQ-MIB", "hh3cQinQPriorityValue"))
if mibBuilder.loadTexts: hh3cQinQPriorityRemarkEntry.setStatus('current')
hh3cQinQPriorityValue = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 3, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8)))
if mibBuilder.loadTexts: hh3cQinQPriorityValue.setStatus('current')
hh3cQinQPriorityRemarkValue = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 3, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQPriorityRemarkValue.setStatus('current')
hh3cQinQPriorityRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 3, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQPriorityRowStatus.setStatus('current')
hh3cQinQVidTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 4), )
if mibBuilder.loadTexts: hh3cQinQVidTable.setStatus('current')
hh3cQinQVidEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 4, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HH3C-QINQ-MIB", "hh3cQinQVlanID"))
if mibBuilder.loadTexts: hh3cQinQVidEntry.setStatus('current')
hh3cQinQVlanID = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 4, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094)))
if mibBuilder.loadTexts: hh3cQinQVlanID.setStatus('current')
hh3cQinQInboundVidListLow = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 4, 1, 2), OctetString().subtype(subtypeSpec=ValueSizeConstraint(256, 256)).setFixedLength(256)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQInboundVidListLow.setStatus('current')
hh3cQinQInboundVidListHigh = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 4, 1, 3), OctetString().subtype(subtypeSpec=ValueSizeConstraint(256, 256)).setFixedLength(256)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQInboundVidListHigh.setStatus('current')
hh3cQinQVidEthernetType = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 4, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(33024)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQVidEthernetType.setStatus('current')
hh3cQinQVidRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 4, 1, 5), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQVidRowStatus.setStatus('current')
hh3cQinQVidSwapTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 5), )
if mibBuilder.loadTexts: hh3cQinQVidSwapTable.setStatus('current')
hh3cQinQVidSwapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 5, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HH3C-QINQ-MIB", "hh3cQinQVlanID"), (0, "HH3C-QINQ-MIB", "hh3cQinQVidSwapOld"))
if mibBuilder.loadTexts: hh3cQinQVidSwapEntry.setStatus('current')
hh3cQinQVidSwapOld = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 5, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094)))
if mibBuilder.loadTexts: hh3cQinQVidSwapOld.setStatus('current')
hh3cQinQVidSwapNew = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 5, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 4094))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQVidSwapNew.setStatus('current')
hh3cQinQVidSwapRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 5, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQVidSwapRowStatus.setStatus('current')
hh3cQinQPrioritySwapTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 6), )
if mibBuilder.loadTexts: hh3cQinQPrioritySwapTable.setStatus('current')
hh3cQinQPrioritySwapEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 6, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"), (0, "HH3C-QINQ-MIB", "hh3cQinQVlanID"), (0, "HH3C-QINQ-MIB", "hh3cQinQPrioritySwapOld"))
if mibBuilder.loadTexts: hh3cQinQPrioritySwapEntry.setStatus('current')
hh3cQinQPrioritySwapOld = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 6, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 8)))
if mibBuilder.loadTexts: hh3cQinQPrioritySwapOld.setStatus('current')
hh3cQinQPrioritySwapNew = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 6, 1, 2), Integer32().subtype(subtypeSpec=ValueRangeConstraint(0, 7))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQPrioritySwapNew.setStatus('current')
hh3cQinQPrioritySwapRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 6, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQPrioritySwapRowStatus.setStatus('current')
hh3cQinQIfConfigTable = MibTable((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 7), )
if mibBuilder.loadTexts: hh3cQinQIfConfigTable.setStatus('current')
hh3cQinQIfConfigEntry = MibTableRow((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 7, 1), ).setIndexNames((0, "IF-MIB", "ifIndex"))
if mibBuilder.loadTexts: hh3cQinQIfConfigEntry.setStatus('current')
hh3cQinQIfEthernetType = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 7, 1, 1), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535)).clone(33024)).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQIfEthernetType.setStatus('current')
hh3cQinQIfSwitch = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 7, 1, 2), Hh3cQinQSwitchState().clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQIfSwitch.setStatus('current')
hh3cQinQIfRowStatus = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 7, 1, 3), RowStatus()).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQIfRowStatus.setStatus('current')
hh3cQinQIfServiceTPIDValue = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 7, 1, 4), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQIfServiceTPIDValue.setStatus('current')
hh3cQinQIfCustomerTPIDValue = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 7, 1, 5), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQIfCustomerTPIDValue.setStatus('current')
hh3cQinQIfUplinkSwitch = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 7, 1, 6), Hh3cQinQSwitchState().clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQIfUplinkSwitch.setStatus('current')
hh3cQinQIfDownlinkSwitch = MibTableColumn((1, 3, 6, 1, 4, 1, 25506, 2, 69, 1, 7, 1, 7), Hh3cQinQSwitchState().clone('disabled')).setMaxAccess("readcreate")
if mibBuilder.loadTexts: hh3cQinQIfDownlinkSwitch.setStatus('current')
mibBuilder.exportSymbols("HH3C-QINQ-MIB", hh3cQinQMibObject=hh3cQinQMibObject, hh3cQinQVidSwapTable=hh3cQinQVidSwapTable, hh3cQinQIfConfigEntry=hh3cQinQIfConfigEntry, hh3cQinQIfServiceTPIDValue=hh3cQinQIfServiceTPIDValue, hh3cQinQPrioritySwapRowStatus=hh3cQinQPrioritySwapRowStatus, hh3cQinQIfConfigTable=hh3cQinQIfConfigTable, hh3cQinQBpduTunnelSwitch=hh3cQinQBpduTunnelSwitch, hh3cQinQVidSwapNew=hh3cQinQVidSwapNew, hh3cQinQPriorityValue=hh3cQinQPriorityValue, hh3cQinQPrioritySwapOld=hh3cQinQPrioritySwapOld, hh3cQinQBpduRowStatus=hh3cQinQBpduRowStatus, hh3cQinQVidRowStatus=hh3cQinQVidRowStatus, hh3cQinQVidEntry=hh3cQinQVidEntry, hh3cQinQIfEthernetType=hh3cQinQIfEthernetType, hh3cQinQIfCustomerTPIDValue=hh3cQinQIfCustomerTPIDValue, hh3cQinQCustomerTPIDValue=hh3cQinQCustomerTPIDValue, hh3cQinQVidTable=hh3cQinQVidTable, hh3cQinQProtocolIndex=hh3cQinQProtocolIndex, hh3cQinQIfRowStatus=hh3cQinQIfRowStatus, hh3cQinQPrioritySwapTable=hh3cQinQPrioritySwapTable, hh3cQinQVidEthernetType=hh3cQinQVidEthernetType, hh3cQinQIfUplinkSwitch=hh3cQinQIfUplinkSwitch, hh3cQinQPriorityRemarkEntry=hh3cQinQPriorityRemarkEntry, hh3cQinQPriorityRowStatus=hh3cQinQPriorityRowStatus, hh3cQinQBpduTunnelTable=hh3cQinQBpduTunnelTable, hh3cQinQVlanID=hh3cQinQVlanID, hh3cQinQIfDownlinkSwitch=hh3cQinQIfDownlinkSwitch, hh3cQinQPrioritySwapEntry=hh3cQinQPrioritySwapEntry, hh3cQinQBpduTunnelEntry=hh3cQinQBpduTunnelEntry, PYSNMP_MODULE_ID=hh3cQINQ, hh3cQinQIfSwitch=hh3cQinQIfSwitch, hh3cQinQInboundVidListLow=hh3cQinQInboundVidListLow, hh3cQinQInboundVidListHigh=hh3cQinQInboundVidListHigh, hh3cQINQ=hh3cQINQ, hh3cQinQPriorityRemarkValue=hh3cQinQPriorityRemarkValue, Hh3cQinQSwitchState=Hh3cQinQSwitchState, hh3cQinQVidSwapRowStatus=hh3cQinQVidSwapRowStatus, hh3cQinQGlobalConfigGroup=hh3cQinQGlobalConfigGroup, hh3cQinQPriorityRemarkTable=hh3cQinQPriorityRemarkTable, hh3cQinQVidSwapEntry=hh3cQinQVidSwapEntry, hh3cQinQEthernetTypeValue=hh3cQinQEthernetTypeValue, hh3cQinQServiceTPIDValue=hh3cQinQServiceTPIDValue, hh3cQinQVidSwapOld=hh3cQinQVidSwapOld, hh3cQinQPrioritySwapNew=hh3cQinQPrioritySwapNew)
| 122.679245
| 2,087
| 0.77653
|
4a12cfa7ae06760d478e02f11bb7a309df904a8f
| 763
|
py
|
Python
|
taskmanager/src/modules/tasks/application/retrieve/get_task_error_handle.py
|
alice-biometrics/petisco-task-manager
|
2bad52013ab122f8c3e5dce740dcd154883e6940
|
[
"MIT"
] | 1
|
2020-04-14T18:12:11.000Z
|
2020-04-14T18:12:11.000Z
|
taskmanager/src/modules/tasks/application/retrieve/get_task_error_handle.py
|
alice-biometrics/petisco-task-manager
|
2bad52013ab122f8c3e5dce740dcd154883e6940
|
[
"MIT"
] | 3
|
2020-04-20T10:35:26.000Z
|
2020-06-15T07:45:59.000Z
|
taskmanager/src/modules/tasks/application/retrieve/get_task_error_handle.py
|
alice-biometrics/petisco-task-manager
|
2bad52013ab122f8c3e5dce740dcd154883e6940
|
[
"MIT"
] | 1
|
2021-03-12T13:48:01.000Z
|
2021-03-12T13:48:01.000Z
|
from meiga import Result
from petisco.controller.errors.http_error import HttpError
from petisco.domain.value_objects.uuid import InvalidUuidError
from taskmanager.src.modules.tasks.domain.errors import TaskNotFoundError
def get_task_error_handler(result: Result) -> HttpError:
domain_error = result.value
http_error = HttpError()
if isinstance(domain_error, TaskNotFoundError):
http_error.message = "Task not found"
http_error.code = 404
http_error.type_error = "TaskNotFoundError"
elif isinstance(domain_error, InvalidUuidError):
http_error.message = "Invalid TaskId. TaskId must be a valid 36-char UUID"
http_error.code = 400
http_error.type_error = "InvalidTaskIdError"
return http_error
| 38.15
| 82
| 0.756225
|
4a12d0d2777d650394da3bdbd61d7c7f3b18a59f
| 8,739
|
py
|
Python
|
BloodBank.py
|
imdewan/Blood-Bank-Management-System
|
15f0343022bd068baa1135926480b5888b182ccc
|
[
"MIT"
] | null | null | null |
BloodBank.py
|
imdewan/Blood-Bank-Management-System
|
15f0343022bd068baa1135926480b5888b182ccc
|
[
"MIT"
] | null | null | null |
BloodBank.py
|
imdewan/Blood-Bank-Management-System
|
15f0343022bd068baa1135926480b5888b182ccc
|
[
"MIT"
] | null | null | null |
import mysql.connector as mysql
from tkinter import *
from tkinter import messagebox
import json
import os
if not os.path.exists("config_db.json"):
f = open("config_db.json", "w")
file1 = open("config_db.json","r")
db_conf=file1.read()
file1.close()
dbdt=['','','']
if db_conf=="":
print("Let's configure your database first:")
dbdt[0]=input("Enter host/server ip: ")
dbdt[1]=input("Database username: ")
dbdt[2]=input("Database password: ")
saveit=json.dumps(dbdt)
db= mysql.connect(
host=dbdt[0],
user=dbdt[1],
password=dbdt[2],
)
cursor=db.cursor()
qdb='''CREATE DATABASE IF NOT EXISTS `blood_donation_db`;
'''
cursor.execute(qdb)
db= mysql.connect(
host=dbdt[0],
user=dbdt[1],
password=dbdt[2],
database="blood_donation_db"
)
cursor=db.cursor()
qdb='''CREATE TABLE IF NOT EXISTS `blood` (
`bloodgroup` varchar(50) DEFAULT NULL,
`platelet` varchar(50) DEFAULT NULL,
`rbc` varchar(50) DEFAULT NULL,
`date` datetime DEFAULT NULL,
`id` int(10) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
'''
cursor.execute(qdb)
qdb='''CREATE TABLE IF NOT EXISTS `donors` (
`name` varchar(50) DEFAULT NULL,
`age` varchar(50) DEFAULT NULL,
`gender` varchar(50) DEFAULT NULL,
`address` varchar(50) DEFAULT NULL,
`contactno` varchar(50) DEFAULT NULL,
`id` int(10) NOT NULL AUTO_INCREMENT,
PRIMARY KEY (`id`)
) ENGINE=MyISAM DEFAULT CHARSET=latin1;
'''
cursor.execute(qdb)
file1 = open("config_db.json","w+")
file1.write(saveit)
file1.close()
else:
dbdt=json.loads(db_conf)
db= mysql.connect(
host=dbdt[0],
user=dbdt[1],
password=dbdt[2],
database="blood_donation_db"
)
cursor=db.cursor()
def mainfn():
root = Tk()
root.title("BLOOD BANK")
root.geometry("1920x1080")
root.configure(background='white')
l3=Label(root,text="BLOOD BANK SYSTEM",bg='white',font = "Helvetica 15 bold").place(x=450,y=40,w=300,h=40)
l1=Label(root,text="Click to enter the details of the donor",bg='white',font="Helvetica 12").place(x=80,y=100,w=300,h=40)
b1=Button(root,text="Donor Details",command=lambda : donordetails()).place(x=80,y=150)
l2=Label(root,text="Click to enter the details of the blood",bg='white',font="Helvetica 12").place(x=80,y=200,w=300,h=40)
b2=Button(root,text="Blood Details",command=lambda : blooddetails()).place(x=80,y=250)
l3=Label(root,text="Click to make a request for blood",bg='white',font="Helvetica 12").place(x=80,y=300,w=300,h=40)
b3=Button(root,text="Blood Request",command=lambda : requestblood()).place(x=80,y=350)
b2=Button(root,text="Exit",command=lambda : stop(root)).place(x=80,y=400)
v = StringVar()
def insertDonor(name,age,gender,address,contactno):
insert = "INSERT INTO donors(name,age,gender,address,contactno) VALUES('"+name+"','"+age+"','"+gender+"','"+address+"',"+"'"+contactno+"')"
try:
cursor.execute(insert)
db.commit()
except:
db.rollback()
blooddetails()
def insertBlood(bloodgroup,platelet,rbc):
insert= "INSERT INTO blood(bloodgroup,platelet,rbc,date) VALUES('"+bloodgroup+"',"+"'"+platelet+"',"+"'"+rbc+"',"+"CURDATE())"
try:
cursor.execute(insert)
db.commit()
except:
db.rollback()
def retrieve(bg):
request="select * from donors inner join blood using(id) where bloodgroup='"+bg+"'"
try:
cursor.execute(request)
rows=cursor.fetchall()
db.commit()
print (len(rows))
return rows
except:
db.rollback()
def donordetails():
global v
v=""
root=Tk()
root.title("BLOOD BANK")
root.geometry("1024x768")
root.configure(background ='#FF8F8F')
l1=Label(root,text="Name:",bg='white',font="Helvetica 12").place(x=40,y=40)
l2=Label(root,text="Age:",bg='white',font="Helvetica 12").place(x=40,y=80)
l3=Label(root,text="Gender:",bg='white',font="Helvetica 12").place(x=40,y=120)
l4=Label(root,text="Address:",bg='white',font="Helvetica 12").place(x=40,y=160)
l5=Label(root,text="Contact:",bg='white',font="Helvetica 12").place(x=40,y=200)
e1=Entry(root)
e1.place(x=120,y=40)
e2=Entry(root)
e2.place(x=120,y=80)
e3=Entry(root)
e3.place(x=120,y=120)
e4=Entry(root)
e4.place(x=120,y=160)
e5=Entry(root)
e5.place(x=120,y=200)
b2=Button(root,text="Back",command=lambda : stop(root)).place(x=120,y=300)
b1=Button(root,text="Submit",command=lambda : insertDonor(e1.get(),e2.get(),e3.get(),e4.get(),e5.get())).place(x=40,y=300)
root.mainloop()
def newbloodin(a,b,c):
insertBlood(a,b,c)
messagebox.showinfo("Info", "Added Successfully")
def blooddetails():
root=Tk()
root.title("BLOOD BANK")
root.geometry("1024x768")
root.configure(background ='#FF8F8F')
l1=Label(root,text="Blood Group:",font="Helvetica 12").place(x=40,y=40,w=250,h=20)
l2=Label(root,text="PLatetelet count (in 100 thousands):",font="Helvetica 12").place(x=40,y=80,w=250,h=20)
l3=Label(root,text="RBC count (in millions):",font="Helvetica 12").place(x=40,y=120,w=250,h=20)
#l4=Label(root,text="Date Of Entry count:").place(x=40,y=160)
e1=Entry(root)
e1.place(x=350,y=40)
e2=Entry(root)
e2.place(x=350,y=80)
e3=Entry(root)
e3.place(x=350,y=120)
b1=Button(root,text="Submit",command=lambda : newbloodin(e1.get(),e2.get(),e3.get())).place(x=40,y=160)
root.mainloop()
def grid1(bg):
root=Tk()
root.title("LIST OF MATCHING DONORS")
root.geometry("750x500")
root.configure(background='#0C43F0')
rows=retrieve(bg)
x=0
for row in rows:
l1=Label(root,text=row[0],bg="#1EDEF2",font = "Verdana 15 bold").grid(row=x,column=0,sticky='E',padx=5,pady=5,ipadx=5,ipady=5)
l2=Label(root,text=row[1],bg="#1EDEF2",font = "Verdana 15 bold").grid(row=x,column=1,sticky='E',padx=5,pady=5,ipadx=5,ipady=5)
l3=Label(root,text=row[2],bg="#1EDEF2",font = "Verdana 15 bold").grid(row=x,column=2,sticky='E',padx=5,pady=5,ipadx=5,ipady=5)
l4=Label(root,text=row[3],bg="#1EDEF2",font = "Verdana 15 bold").grid(row=x,column=3,sticky='E',padx=5,pady=5,ipadx=5,ipady=5)
l5=Label(root,text=row[4],bg="#1EDEF2",font = "Verdana 15 bold").grid(row=x,column=4,sticky='E',padx=5,pady=5,ipadx=5,ipady=5)
l6=Label(root,text=row[5],bg="#1EDEF2",font = "Verdana 15 bold").grid(row=x,column=5,sticky='E',padx=5,pady=5,ipadx=5,ipady=5)
l7=Label(root,text=row[6],bg="#1EDEF2",font = "Verdana 15 bold").grid(row=x,column=6,sticky='E',padx=5,pady=5,ipadx=5,ipady=5)
l8=Label(root,text=row[7],bg="#1EDEF2",font = "Verdana 15 bold").grid(row=x,column=7,sticky='E',padx=5,pady=5,ipadx=5,ipady=5)
l9=Label(root,text=row[8],bg="#1EDEF2",font = "Verdana 15 bold").grid(row=x,column=8,sticky='E',padx=5,pady=5,ipadx=5,ipady=5)
l10=Label(root,text=row[9],bg="#1EDEF2",font = "Verdana 15 bold").grid(row=x,column=9,sticky='E',padx=5,pady=5,ipadx=5,ipady=5)
x=x+1
root.mainloop()
def requestblood():
root=Tk()
root.title("BLOOD BANK")
root.geometry("1024x720")
root.configure(background='#FF8F8F')
l=Label(root,text="Enter the blood group").place(x=50,y=50,w=400,h=40)
e=Entry(root)
e.place(x=500,y=50)
b2=Button(root,text="Back",command=lambda : stop(root)).place(x=600,y=100)
b=Button(root,text="ENTER",command=lambda : grid1(e.get())).place(x=500,y=100)
root.mainloop()
def stop(root):
root.destroy()
def login():
#getting form data
uname=username.get()
pwd=password.get()
#applying empty validation
if uname=='' or pwd=='':
message.set("fill the empty field!!!")
else:
if uname=="admin" and pwd=="abc123":
loggedin="1"
message.set("Login success")
mainfn()
login_screen.destroy()
else:
message.set("Wrong username or password!!!")
#defining loginform function
def Loginform():
global login_screen
login_screen = Tk()
#Setting title of screen
login_screen.title("Login Form")
#setting height and width of screen
login_screen.geometry("300x250")
#declaring variable
global loggedin
global message
global username
global password
username = StringVar()
password = StringVar()
message=StringVar()
#Creating layout of login form
Label(login_screen,width="300", text="Please enter details below", bg="orange",fg="white").pack()
#Username Label
Label(login_screen, text="Username * ").place(x=20,y=40)
#Username textbox
Entry(login_screen, textvariable=username).place(x=90,y=42)
#Password Label
Label(login_screen, text="Password * ").place(x=20,y=80)
#Password textbox
Entry(login_screen, textvariable=password ,show="*").place(x=90,y=82)
#Label for displaying login status[success/failed]
Label(login_screen, text="",textvariable=message).place(x=95,y=100)
#Login button
Button(login_screen, text="Login", width=10, height=1, bg="orange",command=login).place(x=105,y=130)
login_screen.mainloop()
#calling function Loginform
Loginform()
| 34.003891
| 140
| 0.682687
|
4a12d12a16c95cdee1492b39401ba9f7400fab2f
| 30,734
|
py
|
Python
|
examples/pytorch/question-answering/run_seq2seq_qa.py
|
techthiyanes/adapter-transformers
|
04aeaf63c4c54856d416925258393d9e06866b46
|
[
"Apache-2.0"
] | null | null | null |
examples/pytorch/question-answering/run_seq2seq_qa.py
|
techthiyanes/adapter-transformers
|
04aeaf63c4c54856d416925258393d9e06866b46
|
[
"Apache-2.0"
] | null | null | null |
examples/pytorch/question-answering/run_seq2seq_qa.py
|
techthiyanes/adapter-transformers
|
04aeaf63c4c54856d416925258393d9e06866b46
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
# Copyright 2021 The HuggingFace Team All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Fine-tuning the library's seq2seq models for question answering using the 🤗 Seq2SeqTrainer.
"""
# You can also adapt this script on your own question answering task. Pointers for this are left as comments.
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import List, Optional, Tuple
import datasets
from datasets import load_dataset, load_metric
import transformers
from trainer_seq2seq_qa import QuestionAnsweringSeq2SeqTrainer
from transformers import (
AutoConfig,
AutoModelForSeq2SeqLM,
AutoTokenizer,
DataCollatorForSeq2Seq,
HfArgumentParser,
Seq2SeqTrainingArguments,
set_seed,
)
from transformers.trainer_utils import EvalLoopOutput, EvalPrediction, get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.19.0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/question-answering/requirements.txt")
logger = logging.getLogger(__name__)
@dataclass
class ModelArguments:
"""
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from.
"""
model_name_or_path: str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"}
)
config_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"}
)
tokenizer_name: Optional[str] = field(
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"}
)
cache_dir: Optional[str] = field(
default=None,
metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"},
)
use_fast_tokenizer: bool = field(
default=True,
metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."},
)
model_revision: str = field(
default="main",
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."},
)
use_auth_token: bool = field(
default=False,
metadata={
"help": "Will use the token generated when running `transformers-cli login` (necessary to use this script "
"with private models)."
},
)
@dataclass
class DataTrainingArguments:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
dataset_name: Optional[str] = field(
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."}
)
dataset_config_name: Optional[str] = field(
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."}
)
context_column: Optional[str] = field(
default="context",
metadata={"help": "The name of the column in the datasets containing the contexts (for question answering)."},
)
question_column: Optional[str] = field(
default="question",
metadata={"help": "The name of the column in the datasets containing the questions (for question answering)."},
)
answer_column: Optional[str] = field(
default="answers",
metadata={"help": "The name of the column in the datasets containing the answers (for question answering)."},
)
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
test_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: int = field(
default=384,
metadata={
"help": "The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
},
)
max_answer_length: int = field(
default=30,
metadata={
"help": "The maximum length of an answer that can be generated. This is needed because the start "
"and end predictions are not conditioned on one another."
},
)
val_max_answer_length: Optional[int] = field(
default=None,
metadata={
"help": "The maximum total sequence length for validation target text after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded. Will default to `max_answer_length`."
"This argument is also used to override the ``max_length`` param of ``model.generate``, which is used "
"during ``evaluate`` and ``predict``."
},
)
pad_to_max_length: bool = field(
default=True,
metadata={
"help": "Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch (which can "
"be faster on GPU but will be slower on TPU)."
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
},
)
max_predict_samples: Optional[int] = field(
default=None,
metadata={
"help": "For debugging purposes or quicker training, truncate the number of prediction examples to this "
"value if set."
},
)
version_2_with_negative: bool = field(
default=False, metadata={"help": "If true, some of the examples do not have an answer."}
)
null_score_diff_threshold: float = field(
default=0.0,
metadata={
"help": "The threshold used to select the null answer: if the best answer has a score that is less than "
"the score of the null answer minus this threshold, the null answer is selected for this example. "
"Only useful when `version_2_with_negative=True`."
},
)
doc_stride: int = field(
default=128,
metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."},
)
n_best_size: int = field(
default=20,
metadata={"help": "The total number of n-best predictions to generate when looking for an answer."},
)
num_beams: Optional[int] = field(
default=None,
metadata={
"help": "Number of beams to use for evaluation. This argument will be passed to ``model.generate``, "
"which is used during ``evaluate`` and ``predict``."
},
)
ignore_pad_token_for_loss: bool = field(
default=True,
metadata={
"help": "Whether to ignore the tokens corresponding to padded labels in the loss computation or not."
},
)
def __post_init__(self):
if (
self.dataset_name is None
and self.train_file is None
and self.validation_file is None
and self.test_file is None
):
raise ValueError("Need either a dataset name or a training/validation file/test_file.")
else:
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
if self.test_file is not None:
extension = self.test_file.split(".")[-1]
assert extension in ["csv", "json"], "`test_file` should be a csv or a json file."
if self.val_max_answer_length is None:
self.val_max_answer_length = self.max_answer_length
question_answering_column_name_mapping = {
"squad_v2": ("question", "context", "answer"),
}
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, Seq2SeqTrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Detecting last checkpoint.
last_checkpoint = None
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
last_checkpoint = get_last_checkpoint(training_args.output_dir)
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
"Use --overwrite_output_dir to overcome."
)
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
)
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
raw_datasets = load_dataset(
data_args.dataset_name, data_args.dataset_config_name, cache_dir=model_args.cache_dir
)
else:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
if data_args.test_file is not None:
data_files["test"] = data_args.test_file
extension = data_args.test_file.split(".")[-1]
raw_datasets = load_dataset(extension, data_files=data_files, field="data", cache_dir=model_args.cache_dir)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=True,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model = AutoModelForSeq2SeqLM.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
use_auth_token=True if model_args.use_auth_token else None,
)
model.resize_token_embeddings(len(tokenizer))
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined")
# Preprocessing the datasets.
# We need to generate and tokenize inputs and targets.
if training_args.do_train:
column_names = raw_datasets["train"].column_names
elif training_args.do_eval:
column_names = raw_datasets["validation"].column_names
elif training_args.do_predict:
column_names = raw_datasets["test"].column_names
else:
logger.info("There is nothing to do. Please pass `do_train`, `do_eval` and/or `do_predict`.")
return
# Get the column names for input/target.
dataset_columns = question_answering_column_name_mapping.get(data_args.dataset_name, None)
if data_args.question_column is None:
question_column = dataset_columns[0] if dataset_columns is not None else column_names[0]
else:
question_column = data_args.question_column
if question_column not in column_names:
raise ValueError(
f"--question_column' value '{data_args.question_column}' needs to be one of: {', '.join(column_names)}"
)
if data_args.context_column is None:
context_column = dataset_columns[1] if dataset_columns is not None else column_names[1]
else:
context_column = data_args.context_column
if context_column not in column_names:
raise ValueError(
f"--context_column' value '{data_args.context_column}' needs to be one of: {', '.join(column_names)}"
)
if data_args.answer_column is None:
answer_column = dataset_columns[2] if dataset_columns is not None else column_names[2]
else:
answer_column = data_args.answer_column
if answer_column not in column_names:
raise ValueError(
f"--answer_column' value '{data_args.answer_column}' needs to be one of: {', '.join(column_names)}"
)
# Temporarily set max_answer_length for training.
max_answer_length = data_args.max_answer_length
padding = "max_length" if data_args.pad_to_max_length else False
if training_args.label_smoothing_factor > 0 and not hasattr(model, "prepare_decoder_input_ids_from_labels"):
logger.warning(
"label_smoothing is enabled but the `prepare_decoder_input_ids_from_labels` method is not defined for"
f"`{model.__class__.__name__}`. This will lead to loss being calculated twice and will take up more memory"
)
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
def preprocess_squad_batch(
examples,
question_column: str,
context_column: str,
answer_column: str,
) -> Tuple[List[str], List[str]]:
questions = examples[question_column]
contexts = examples[context_column]
answers = examples[answer_column]
def generate_input(_question, _context):
return " ".join(["question:", _question.lstrip(), "context:", _context.lstrip()])
inputs = [generate_input(question, context) for question, context in zip(questions, contexts)]
targets = [answer["text"][0] if len(answer["text"]) > 0 else "" for answer in answers]
return inputs, targets
def preprocess_function(examples):
inputs, targets = preprocess_squad_batch(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(inputs, max_length=max_seq_length, padding=padding, truncation=True)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_answer_length, padding=padding, truncation=True)
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
# Validation preprocessing
def preprocess_validation_function(examples):
inputs, targets = preprocess_squad_batch(examples, question_column, context_column, answer_column)
model_inputs = tokenizer(
inputs,
max_length=max_seq_length,
padding=padding,
truncation=True,
return_overflowing_tokens=True,
return_offsets_mapping=True,
)
# Setup the tokenizer for targets
with tokenizer.as_target_tokenizer():
labels = tokenizer(targets, max_length=max_answer_length, padding=padding, truncation=True)
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
sample_mapping = model_inputs.pop("overflow_to_sample_mapping")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
model_inputs["example_id"] = []
for i in range(len(model_inputs["input_ids"])):
# One example can give several spans, this is the index of the example containing this span of text.
sample_index = sample_mapping[i]
model_inputs["example_id"].append(examples["id"][sample_index])
# If we are padding here, replace all tokenizer.pad_token_id in the labels by -100 when we want to ignore
# padding in the loss.
if padding == "max_length" and data_args.ignore_pad_token_for_loss:
labels["input_ids"] = [
[(l if l != tokenizer.pad_token_id else -100) for l in label] for label in labels["input_ids"]
]
model_inputs["labels"] = labels["input_ids"]
return model_inputs
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
# We will select sample from whole data if agument is specified
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
# Create train feature from dataset
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on train dataset",
)
if data_args.max_train_samples is not None:
# Number of samples might increase during Feature Creation, We select only specified max samples
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_examples = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
# We will select sample from whole data
max_eval_samples = min(len(eval_examples), data_args.max_eval_samples)
eval_examples = eval_examples.select(range(max_eval_samples))
# Validation Feature Creation
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_examples.map(
preprocess_validation_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on validation dataset",
)
if data_args.max_eval_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
if training_args.do_predict:
if "test" not in raw_datasets:
raise ValueError("--do_predict requires a test dataset")
predict_examples = raw_datasets["test"]
if data_args.max_predict_samples is not None:
# We will select sample from whole data
predict_examples = predict_examples.select(range(data_args.max_predict_samples))
# Predict Feature Creation
with training_args.main_process_first(desc="prediction dataset map pre-processing"):
predict_dataset = predict_examples.map(
preprocess_validation_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not data_args.overwrite_cache,
desc="Running tokenizer on prediction dataset",
)
if data_args.max_predict_samples is not None:
# During Feature creation dataset samples might increase, we will select required samples again
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples)
predict_dataset = predict_dataset.select(range(max_predict_samples))
# Data collator
label_pad_token_id = -100 if data_args.ignore_pad_token_for_loss else tokenizer.pad_token_id
data_collator = DataCollatorForSeq2Seq(
tokenizer,
model=model,
label_pad_token_id=label_pad_token_id,
pad_to_multiple_of=8 if training_args.fp16 else None,
)
metric = load_metric("squad_v2" if data_args.version_2_with_negative else "squad")
def compute_metrics(p: EvalPrediction):
return metric.compute(predictions=p.predictions, references=p.label_ids)
# Post-processing:
def post_processing_function(
examples: datasets.Dataset, features: datasets.Dataset, outputs: EvalLoopOutput, stage="eval"
):
# Decode the predicted tokens.
preds = outputs.predictions
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
# Build a map example to its corresponding features.
example_id_to_index = {k: i for i, k in enumerate(examples["id"])}
feature_per_example = {example_id_to_index[feature["example_id"]]: i for i, feature in enumerate(features)}
predictions = {}
# Let's loop over all the examples!
for example_index, example in enumerate(examples):
# This is the index of the feature associated to the current example.
feature_index = feature_per_example[example_index]
predictions[example["id"]] = decoded_preds[feature_index]
# Format the result to the format the metric expects.
if data_args.version_2_with_negative:
formatted_predictions = [
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items()
]
else:
formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()]
references = [{"id": ex["id"], "answers": ex[answer_column]} for ex in examples]
return EvalPrediction(predictions=formatted_predictions, label_ids=references)
# Initialize our Trainer
trainer = QuestionAnsweringSeq2SeqTrainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
eval_examples=eval_examples if training_args.do_eval else None,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
post_process_function=post_processing_function,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
checkpoint = last_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
results = {}
max_length = (
training_args.generation_max_length
if training_args.generation_max_length is not None
else data_args.val_max_answer_length
)
num_beams = data_args.num_beams if data_args.num_beams is not None else training_args.generation_num_beams
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate(max_length=max_length, num_beams=num_beams, metric_key_prefix="eval")
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***")
results = trainer.predict(predict_dataset, predict_examples)
metrics = results.metrics
max_predict_samples = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(predict_dataset)
)
metrics["predict_samples"] = min(max_predict_samples, len(predict_dataset))
trainer.log_metrics("predict", metrics)
trainer.save_metrics("predict", metrics)
if training_args.push_to_hub:
kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"}
if data_args.dataset_name is not None:
kwargs["dataset_tags"] = data_args.dataset_name
if data_args.dataset_config_name is not None:
kwargs["dataset_args"] = data_args.dataset_config_name
kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}"
else:
kwargs["dataset"] = data_args.dataset_name
trainer.push_to_hub(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 44.932749
| 120
| 0.676157
|
4a12d152b952cca441858cf920ad0cddd658aee5
| 75
|
py
|
Python
|
ultimatetictactoe/game/__init__.py
|
stoimenoff/ultimate-tic-tac-tie
|
1d6980dcf51c020edef5edd2a1772cd96c003fd0
|
[
"MIT"
] | 4
|
2016-11-02T19:00:29.000Z
|
2019-12-05T10:38:20.000Z
|
ultimatetictactoe/game/__init__.py
|
stoimenoff/ultimate-tic-tac-tie
|
1d6980dcf51c020edef5edd2a1772cd96c003fd0
|
[
"MIT"
] | null | null | null |
ultimatetictactoe/game/__init__.py
|
stoimenoff/ultimate-tic-tac-tie
|
1d6980dcf51c020edef5edd2a1772cd96c003fd0
|
[
"MIT"
] | 3
|
2016-05-26T17:38:57.000Z
|
2019-05-05T17:29:29.000Z
|
from . import boards
from . import players
__all__ = ['boards', 'players']
| 18.75
| 31
| 0.706667
|
4a12d168427d129a7c7d392b60372872cd0d490d
| 1,476
|
py
|
Python
|
bin/convert_c_spinner.py
|
onslaught-demogroup/ons_paddo_music_disk
|
6a945f918fd1220b325385d14327b5e1ee86295d
|
[
"MIT"
] | null | null | null |
bin/convert_c_spinner.py
|
onslaught-demogroup/ons_paddo_music_disk
|
6a945f918fd1220b325385d14327b5e1ee86295d
|
[
"MIT"
] | null | null | null |
bin/convert_c_spinner.py
|
onslaught-demogroup/ons_paddo_music_disk
|
6a945f918fd1220b325385d14327b5e1ee86295d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os
from pathlib import Path
import glob
current: Path = Path(os.path.dirname(os.path.realpath(__file__)))
root: Path = current.parent.absolute()
graphics_source = root.joinpath("assets/spinner")
graphics_target = root.joinpath("src")
graphics_target.mkdir(parents=True, exist_ok=True)
print(f"Rendering PETSCII conversion template... {graphics_source}")
output: Path = graphics_target.joinpath("spinner_include.asm")
filenames = sorted( filter( os.path.isfile, glob.glob(str(graphics_source.joinpath("*.c")))))
with open(output, mode="w") as o:
o.write("""
/*
This file is autogenerated by bin/convert_c_spinner.py
*/
""")
for filename in filenames:
name: str = os.path.basename(filename).strip(".c")
working: Path = graphics_source.joinpath(filename)
with open(working, mode="r") as f:
lines: list = f.readlines()
count: int = 0
for line in lines:
if line[0].isnumeric():
data: str = line.strip(",\n")
if count == 8:
o.write(f"\n\n.align $100\n{name}_txt:\n")
if count >= 8 and count < 14:
o.write(f".byte {data}\n")
if count == 33:
o.write(f"\n\n.align $100\n{name}_col:\n")
if count >= 33 and count < 39:
o.write(f".byte {data}\n")
count += 1
| 36
| 93
| 0.568428
|
4a12d16bbfb2cf61ce84b3f1f47a1f50d7c6d406
| 963
|
py
|
Python
|
tests/executors/encoders/audio/test_spectral.py
|
xubiuit/jina
|
4ab91693c2d51a35eca3cf6c187034e0568b0ac9
|
[
"Apache-2.0"
] | null | null | null |
tests/executors/encoders/audio/test_spectral.py
|
xubiuit/jina
|
4ab91693c2d51a35eca3cf6c187034e0568b0ac9
|
[
"Apache-2.0"
] | null | null | null |
tests/executors/encoders/audio/test_spectral.py
|
xubiuit/jina
|
4ab91693c2d51a35eca3cf6c187034e0568b0ac9
|
[
"Apache-2.0"
] | null | null | null |
import unittest
import numpy as np
from jina.executors.encoders.audio.spectral import MFCCTimbreEncoder, ChromaPitchEncoder
from tests import JinaTestCase
class MyTestCase(JinaTestCase):
def test_mfcc_encoder(self):
batch_size = 10
n_frames = 5
signal_length = 500 * n_frames
test_data = np.random.randn(batch_size, signal_length)
n_mfcc = 12
encoder = MFCCTimbreEncoder(n_mfcc=n_mfcc)
encoded_data = encoder.encode(test_data)
self.assertEqual(encoded_data.shape, (batch_size, n_mfcc * n_frames))
def test_chroma_encoder(self):
batch_size = 10
n_frames = 5
signal_length = 500 * n_frames
test_data = np.random.randn(batch_size, signal_length)
encoder = ChromaPitchEncoder()
encoded_data = encoder.encode(test_data)
self.assertEqual(encoded_data.shape, (batch_size, 12 * n_frames))
if __name__ == '__main__':
unittest.main()
| 30.09375
| 88
| 0.692627
|
4a12d1c093fefdb845df081345388d08defed857
| 195
|
py
|
Python
|
yayinevi/deleteBook.py
|
emremrt98/Python-Projeleri
|
01fb22a9bd1f4cfd6884f2e278e20e04d33ea619
|
[
"MIT"
] | null | null | null |
yayinevi/deleteBook.py
|
emremrt98/Python-Projeleri
|
01fb22a9bd1f4cfd6884f2e278e20e04d33ea619
|
[
"MIT"
] | null | null | null |
yayinevi/deleteBook.py
|
emremrt98/Python-Projeleri
|
01fb22a9bd1f4cfd6884f2e278e20e04d33ea619
|
[
"MIT"
] | null | null | null |
import sqlite3
con = sqlite3.connect("yayinevi.db")
cursor = con.cursor()
def kitapSilme(kitapAdi):
cursor.execute("DELETE FROM kitaplik WHERE Adı = (?)", kitapAdi)
con.commit()
| 17.727273
| 69
| 0.676923
|
4a12d1c919e74db22482c9aed96ed464e163ae90
| 1,432
|
py
|
Python
|
Support/Fuego/Pythia/pythia-0.5/packages/fuego/fuego/serialization/mechanisms/Entity.py
|
balos1/PelePhysics
|
d01190cc7b0eaad4ec96fac573034ccb485f0e9f
|
[
"BSD-3-Clause-LBNL"
] | 31
|
2018-11-21T01:49:06.000Z
|
2022-03-30T03:41:43.000Z
|
Support/Fuego/Pythia/pythia-0.5/packages/fuego/fuego/serialization/mechanisms/Entity.py
|
balos1/PelePhysics
|
d01190cc7b0eaad4ec96fac573034ccb485f0e9f
|
[
"BSD-3-Clause-LBNL"
] | 123
|
2019-03-12T22:27:29.000Z
|
2022-03-29T17:00:04.000Z
|
Support/Fuego/Pythia/pythia-0.5/packages/fuego/fuego/serialization/mechanisms/Entity.py
|
sundials-codes/PelePhysics
|
5624f83a04f43aa95288be9d8a7bb372a4adefe6
|
[
"BSD-3-Clause-LBNL"
] | 32
|
2018-11-05T11:51:59.000Z
|
2022-03-29T13:09:32.000Z
|
#!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Michael A.G. Aivazis
# California Institute of Technology
# (C) 1998-2003 All Rights Reserved
#
# <LicenseText>
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
from __future__ import division
from builtins import object
class Entity(object):
_FILENAME_LIMIT = 40
def locator(self, locator):
self.filename = locator.filename
self.line = locator.line
self.column = locator.column
return
def __init__(self, id, locator):
self.id = id
if locator:
self.filename = locator.filename
self.line = locator.line
self.column = locator.column
else:
self.filename = ""
self.line = 0
self.column = 0
return
def __str__(self):
filename = self.filename
if not filename:
return "<unknown>"
if len(filename) > self._FILENAME_LIMIT:
filename = (
filename[: self._FILENAME_LIMIT // 2 - 3]
+ "..."
+ filename[-self._FILENAME_LIMIT // 2 + 3 :]
)
return "'%s':(%d, %d)" % (filename, self.line, self.column)
# version
__id__ = "$Id$"
# End of file
| 23.47541
| 82
| 0.453911
|
4a12d3631b51816ba4dea5dafc791f42c3a51a66
| 3,790
|
py
|
Python
|
crawler/hunan_zhuzhou_whg.py
|
zixinzeng-jennifer/public-culture-activity
|
abe908cb10407e7e4753ef541476e4267d24a1b8
|
[
"CC0-1.0"
] | null | null | null |
crawler/hunan_zhuzhou_whg.py
|
zixinzeng-jennifer/public-culture-activity
|
abe908cb10407e7e4753ef541476e4267d24a1b8
|
[
"CC0-1.0"
] | null | null | null |
crawler/hunan_zhuzhou_whg.py
|
zixinzeng-jennifer/public-culture-activity
|
abe908cb10407e7e4753ef541476e4267d24a1b8
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
from bs4 import BeautifulSoup
from cultureBigdata.items import CultureNewsItem, CultureBasicItem, CultureEventItem
import re
import time
from urllib.parse import unquote
import os
import codecs
import json
import sys
class ShanxiwhySpider(scrapy.Spider):
name = 'hunan_zhuzhou'
#allowed_domains = ['jlsdmu.com/']
def start_requests(self):
for i in range(1,5):
headers = {'Host':'www.hnzzwhg.com',
'Referer': 'http://www.hnzzwhg.com/zzs/user_activityMore/',
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36',}
url = 'http://www.hnzzwhg.com/activity/getFActivitySearch.show?activityFlag=0&activityTime=0&pageNum='+str(i)+'&pageSize=10&sort=0&title='
print("-----------第"+str(i)+"页-----------")
yield scrapy.FormRequest(url,
headers = headers,
method = 'GET', # GET or POST
callback = self.event_parse,
dont_filter = True,)
def event_parse(self, response):
item = CultureEventItem()
result = json.loads(response.body.decode('utf-8').encode('utf-8'))
records = result['data']['list']
for record in records:
detail_id = record['contentId']
item['pav_name'] = '湖南株洲文化馆'
url = 'http://www.hnzzwhg.com/activity/getFActivityDetailForJoin.show?id='+str(detail_id)
item['activity_name'] = record['title']
item['place'] = record['address']
item['activity_type'] = record['dlabelList']
item['activity_time'] = record['createTime']
item['organizer'] = record['pname']
item['url'] = record['linkAddress']
headers1 = {':Host': 'www.hnzzwhg.com',
'referer': 'http://www.hnzzwhg.com/zzs/user_activityDetail/'+str(detail_id),}
yield scrapy.FormRequest(url,
headers = headers1,
method = 'GET', # GET or POST
callback = self.event_text_parse,
meta={'item': item},
dont_filter = True)
def event_text_parse(self, response):
result = json.loads(response.body.decode('utf-8').encode('utf-8'))
item = response.meta['item']
contents = result['data']
item['activity_time'] = contents['activityStartTime']
item['remark'] = contents['brief']
return item
'''
def intro_parse(self, response):
item = CultureBasicItem()
data = response.body
item['pav_name'] = '广东省文化馆'
item['pav_introduction'] = "广东省文化馆(广东省非物质文化遗产保护中心)成立于1956年,是广东省人民政府设立的专门从事群众文化工作和非物质文化传承保护工作的正处级公益一类事业单位。其主要职责任务有:组织开展具有导向性、示范性的群众文化艺术活动;辅导农村、社区、企业等开展群众文化活动,辅导、培训辖区内文化馆、站业余干部及文艺活动业务骨干,组织、指导、研究群众性文艺创作活动;组织开展群众文艺理论研究,搜集、整理、保护民族民间文化艺术遗产;负责广东省文化志愿者总队及全省文化志愿者队伍建设、管理、培训工作和文化志愿服务开展;执行全省非物质文化遗产保护的规划、计划和工作规范,组织实施全省非物质文化遗产的普查、认定、申报、保护和交流传播工作。馆内设有办公室、活动部、培训部、创作部、信息部、团队部、拓展部、省非物质文化遗产保护中心办公室共八个部室。 作为我省现代公共文化服务体系建设和公共文化服务的重要参与者、提供者,广东省文化馆始终坚持在省委、省政府和省文化厅的领导下,围绕党和政府的中心工作,以满足群众文化需求为立足点,以改善群众文化生活为目标,充分发挥省馆龙头示范作用,不断完善和创新现代公共文化服务,努力实现好、维护好、保障好广大人民群众的基本公共文化权益;以高度的历史责任感和使命感,着力推进现代公共文化服务体系建设,为我省建设文化强省和幸福广东,实现“三个定位、两个率先”的总目标做出应有的贡献。"
item['region'] = '广东'
item['area_number'] = ''
item['collection_number'] = ''
item['branch_number'] = ''
item['librarian_number'] = ''
item['client_number'] = ''
item['activity_number'] = ''
yield item
'''
| 45.119048
| 633
| 0.595778
|
4a12d4cdd27637cfbb6793d8633f93883a87c202
| 33
|
py
|
Python
|
ch05construction/species.py
|
Subhanshuupadhyay/rsd-engineeringcourse
|
3e8a440f3d70ece500ef25677c6966b54282a0a7
|
[
"CC-BY-3.0"
] | 86
|
2015-02-18T13:44:29.000Z
|
2022-03-15T06:43:22.000Z
|
ch05construction/species.py
|
Subhanshuupadhyay/rsd-engineeringcourse
|
3e8a440f3d70ece500ef25677c6966b54282a0a7
|
[
"CC-BY-3.0"
] | 132
|
2015-02-25T13:04:52.000Z
|
2022-02-16T10:50:15.000Z
|
ch05construction/species.py
|
Subhanshuupadhyay/rsd-engineeringcourse
|
3e8a440f3d70ece500ef25677c6966b54282a0a7
|
[
"CC-BY-3.0"
] | 84
|
2015-02-18T13:44:32.000Z
|
2022-02-20T07:59:29.000Z
|
class Species(object):
pass
| 11
| 22
| 0.666667
|
4a12d5503e3d8a9013f101590c4281bf9a953076
| 381
|
py
|
Python
|
kivy_helpers.py
|
YotamAlon/AudioChef
|
d06796b789eda945b018e5685e8440a232ceadd3
|
[
"MIT"
] | null | null | null |
kivy_helpers.py
|
YotamAlon/AudioChef
|
d06796b789eda945b018e5685e8440a232ceadd3
|
[
"MIT"
] | null | null | null |
kivy_helpers.py
|
YotamAlon/AudioChef
|
d06796b789eda945b018e5685e8440a232ceadd3
|
[
"MIT"
] | null | null | null |
def toggle_widget(wid, hide):
if hasattr(wid, 'saved_attrs'):
if not hide:
wid.height, wid.size_hint_y, wid.opacity, wid.disabled = wid.saved_attrs
del wid.saved_attrs
elif hide:
wid.saved_attrs = wid.height, wid.size_hint_y, wid.opacity, wid.disabled
wid.height, wid.size_hint_y, wid.opacity, wid.disabled = 0, None, 0, True
| 47.625
| 84
| 0.656168
|
4a12d61c672a69c3735a493ef394901afafcfac6
| 1,765
|
py
|
Python
|
aliyun-python-sdk-r-kvstore/aliyunsdkr_kvstore/request/v20150101/DescribeTempInstanceRequest.py
|
DataDog/aliyun-openapi-python-sdk
|
5cbee29bce6416dd62f61f0c3786b1af6ea0d84f
|
[
"Apache-2.0"
] | 1
|
2019-12-23T12:36:43.000Z
|
2019-12-23T12:36:43.000Z
|
aliyun-python-sdk-r-kvstore/aliyunsdkr_kvstore/request/v20150101/DescribeTempInstanceRequest.py
|
liusc27/aliyun-openapi-python-sdk
|
5e3db3535dd21de987dc5981e71151327d5a884f
|
[
"Apache-2.0"
] | null | null | null |
aliyun-python-sdk-r-kvstore/aliyunsdkr_kvstore/request/v20150101/DescribeTempInstanceRequest.py
|
liusc27/aliyun-openapi-python-sdk
|
5e3db3535dd21de987dc5981e71151327d5a884f
|
[
"Apache-2.0"
] | 1
|
2021-02-23T11:27:54.000Z
|
2021-02-23T11:27:54.000Z
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class DescribeTempInstanceRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'R-kvstore', '2015-01-01', 'DescribeTempInstance','redisa')
def get_ResourceOwnerId(self):
return self.get_query_params().get('ResourceOwnerId')
def set_ResourceOwnerId(self,ResourceOwnerId):
self.add_query_param('ResourceOwnerId',ResourceOwnerId)
def get_ResourceOwnerAccount(self):
return self.get_query_params().get('ResourceOwnerAccount')
def set_ResourceOwnerAccount(self,ResourceOwnerAccount):
self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount)
def get_OwnerAccount(self):
return self.get_query_params().get('OwnerAccount')
def set_OwnerAccount(self,OwnerAccount):
self.add_query_param('OwnerAccount',OwnerAccount)
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
| 36.770833
| 88
| 0.777337
|
4a12d645dead4d6f6442685fb632be2ed73d92be
| 326
|
py
|
Python
|
rewards/admin.py
|
zizoubrown/Awards
|
53af5e9706bdc18a949d49653a8abbf93fb4122d
|
[
"Unlicense"
] | null | null | null |
rewards/admin.py
|
zizoubrown/Awards
|
53af5e9706bdc18a949d49653a8abbf93fb4122d
|
[
"Unlicense"
] | null | null | null |
rewards/admin.py
|
zizoubrown/Awards
|
53af5e9706bdc18a949d49653a8abbf93fb4122d
|
[
"Unlicense"
] | null | null | null |
from django.contrib import admin
from .models import Profile, Image, Rating
admin.site.site_header = "Awards Admin"
admin.site.site_title = "Awards Admin Area"
admin.site.index_title = "Welcome to Awards admin"
# Register your models here.
admin.site.register(Profile)
admin.site.register(Image)
admin.site.register(Rating)
| 27.166667
| 50
| 0.791411
|
4a12d7b8cf82720905df6659c2d2c7d085815b91
| 78
|
py
|
Python
|
info/settings/__init__.py
|
rkorkosz/rinfo
|
5e9f8f75e359550eedd23948e29f915df36367df
|
[
"MIT"
] | null | null | null |
info/settings/__init__.py
|
rkorkosz/rinfo
|
5e9f8f75e359550eedd23948e29f915df36367df
|
[
"MIT"
] | 2
|
2015-09-29T21:46:19.000Z
|
2015-09-29T21:46:35.000Z
|
info/settings/__init__.py
|
Korkki/rinfo
|
5e9f8f75e359550eedd23948e29f915df36367df
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from __future__ import absolute_import
| 39
| 39
| 0.910256
|
4a12d86407c384d5d143878c5826615b5f234186
| 4,616
|
py
|
Python
|
file_repository_sdk/model/resource_manage/filter_strategy_instance_data_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5
|
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
file_repository_sdk/model/resource_manage/filter_strategy_instance_data_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
file_repository_sdk/model/resource_manage/filter_strategy_instance_data_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: filter_strategy_instance_data.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from file_repository_sdk.model.resource_manage import filter_condition_group_pb2 as file__repository__sdk_dot_model_dot_resource__manage_dot_filter__condition__group__pb2
from google.protobuf import struct_pb2 as google_dot_protobuf_dot_struct__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='filter_strategy_instance_data.proto',
package='resource_manage',
syntax='proto3',
serialized_options=_b('ZIgo.easyops.local/contracts/protorepo-models/easyops/model/resource_manage'),
serialized_pb=_b('\n#filter_strategy_instance_data.proto\x12\x0fresource_manage\x1a\x46\x66ile_repository_sdk/model/resource_manage/filter_condition_group.proto\x1a\x1cgoogle/protobuf/struct.proto\"\xa2\x01\n\x1a\x46ilterStrategyInstanceData\x12\n\n\x02id\x18\x01 \x01(\t\x12\x1a\n\x12strategyInstanceId\x18\x02 \x01(\t\x12%\n\x04\x64\x61ta\x18\x03 \x01(\x0b\x32\x17.google.protobuf.Struct\x12\x35\n\x06\x66ilter\x18\x04 \x03(\x0b\x32%.resource_manage.FilterConditionGroupBKZIgo.easyops.local/contracts/protorepo-models/easyops/model/resource_manageb\x06proto3')
,
dependencies=[file__repository__sdk_dot_model_dot_resource__manage_dot_filter__condition__group__pb2.DESCRIPTOR,google_dot_protobuf_dot_struct__pb2.DESCRIPTOR,])
_FILTERSTRATEGYINSTANCEDATA = _descriptor.Descriptor(
name='FilterStrategyInstanceData',
full_name='resource_manage.FilterStrategyInstanceData',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='resource_manage.FilterStrategyInstanceData.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='strategyInstanceId', full_name='resource_manage.FilterStrategyInstanceData.strategyInstanceId', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='data', full_name='resource_manage.FilterStrategyInstanceData.data', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='filter', full_name='resource_manage.FilterStrategyInstanceData.filter', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=159,
serialized_end=321,
)
_FILTERSTRATEGYINSTANCEDATA.fields_by_name['data'].message_type = google_dot_protobuf_dot_struct__pb2._STRUCT
_FILTERSTRATEGYINSTANCEDATA.fields_by_name['filter'].message_type = file__repository__sdk_dot_model_dot_resource__manage_dot_filter__condition__group__pb2._FILTERCONDITIONGROUP
DESCRIPTOR.message_types_by_name['FilterStrategyInstanceData'] = _FILTERSTRATEGYINSTANCEDATA
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
FilterStrategyInstanceData = _reflection.GeneratedProtocolMessageType('FilterStrategyInstanceData', (_message.Message,), {
'DESCRIPTOR' : _FILTERSTRATEGYINSTANCEDATA,
'__module__' : 'filter_strategy_instance_data_pb2'
# @@protoc_insertion_point(class_scope:resource_manage.FilterStrategyInstanceData)
})
_sym_db.RegisterMessage(FilterStrategyInstanceData)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 47.102041
| 564
| 0.80091
|
4a12d885ebc5dd3fd2445c0116b3d1aeccb325f8
| 8,509
|
py
|
Python
|
main.py
|
thejerrytan/CarND-Semantic-Segmentation
|
5f8e5b1d1278833f0f46bac686a3a19b37f997ba
|
[
"MIT"
] | null | null | null |
main.py
|
thejerrytan/CarND-Semantic-Segmentation
|
5f8e5b1d1278833f0f46bac686a3a19b37f997ba
|
[
"MIT"
] | null | null | null |
main.py
|
thejerrytan/CarND-Semantic-Segmentation
|
5f8e5b1d1278833f0f46bac686a3a19b37f997ba
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os.path
import tensorflow as tf
import helper
import warnings
from distutils.version import LooseVersion
import project_tests as tests
# Check TensorFlow Version
assert LooseVersion(tf.__version__) >= LooseVersion('1.0'), 'Please use TensorFlow version 1.0 or newer. You are using {}'.format(tf.__version__)
print('TensorFlow Version: {}'.format(tf.__version__))
# Check for a GPU
if not tf.test.gpu_device_name():
warnings.warn('No GPU found. Please use a GPU to train your neural network.')
else:
print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
def load_vgg(sess, vgg_path):
"""
Load Pretrained VGG Model into TensorFlow.
:param sess: TensorFlow Session
:param vgg_path: Path to vgg folder, containing "variables/" and "saved_model.pb"
:return: Tuple of Tensors from VGG model (image_input, keep_prob, layer3_out, layer4_out, layer7_out)
"""
# TODO: Implement function
# Use tf.saved_model.loader.load to load the model and weights
vgg_tag = 'vgg16'
vgg_input_tensor_name = 'image_input:0'
vgg_keep_prob_tensor_name = 'keep_prob:0'
vgg_layer3_out_tensor_name = 'layer3_out:0'
vgg_layer4_out_tensor_name = 'layer4_out:0'
vgg_layer7_out_tensor_name = 'layer7_out:0'
tf.saved_model.loader.load(sess, [vgg_tag], vgg_path)
graph = tf.get_default_graph()
input_layer = graph.get_tensor_by_name(vgg_input_tensor_name)
keep_prob = graph.get_tensor_by_name(vgg_keep_prob_tensor_name)
layer3 = graph.get_tensor_by_name(vgg_layer3_out_tensor_name)
layer4 = graph.get_tensor_by_name(vgg_layer4_out_tensor_name)
layer7 = graph.get_tensor_by_name(vgg_layer7_out_tensor_name)
return input_layer, keep_prob, layer3, layer4, layer7
tests.test_load_vgg(load_vgg, tf)
def layers(vgg_layer3_out, vgg_layer4_out, vgg_layer7_out, num_classes):
"""
Create the layers for a fully convolutional network. Build skip-layers using the vgg layers.
:param vgg_layer3_out: TF Tensor for VGG Layer 3 output
:param vgg_layer4_out: TF Tensor for VGG Layer 4 output
:param vgg_layer7_out: TF Tensor for VGG Layer 7 output
:param num_classes: Number of classes to classify
:return: The Tensor for the last layer of output
"""
# TODO: Implement function
weights_stddev = 0.01
weights_l2 = 1e-3
conv_1x1_7 = tf.layers.conv2d(vgg_layer7_out, num_classes, 1, padding='same', kernel_initializer = tf.random_normal_initializer(stddev=weights_stddev) , kernel_regularizer=tf.contrib.layers.l2_regularizer(weights_l2), name='conv_1x1_of_7')
first_upsamplex2 = tf.layers.conv2d_transpose(conv_1x1_7, num_classes, 4, strides=(2,2), padding='same', kernel_initializer=tf.random_normal_initializer(stddev=weights_stddev), kernel_regularizer=tf.contrib.layers.l2_regularizer(weights_l2), name='first_upsamplex2')
conv_1x1_4 = tf.layers.conv2d(vgg_layer4_out, num_classes, 1, padding='same', kernel_initializer=tf.random_normal_initializer(stddev=weights_stddev), kernel_regularizer=tf.contrib.layers.l2_regularizer(weights_l2), name='conv_1x1_of_4')
first_skip = tf.add(first_upsamplex2, conv_1x1_4, name='first_skip')
second_upsamplex2 = tf.layers.conv2d_transpose(first_skip, num_classes, 4, strides=(2,2), padding='same', kernel_initializer=tf.random_normal_initializer(stddev=weights_stddev), kernel_regularizer=tf.contrib.layers.l2_regularizer(weights_l2), name='second_upsamplex2')
conv_1x1_3 = tf.layers.conv2d(vgg_layer3_out, num_classes, 1, padding='same', kernel_initializer=tf.random_normal_initializer(stddev=weights_stddev), kernel_regularizer=tf.contrib.layers.l2_regularizer(weights_l2), name='conv_1x1_of_3')
second_skip = tf.add(second_upsamplex2, conv_1x1_3, name='second_skip')
third_upsamplex8 = tf.layers.conv2d_transpose(second_skip, num_classes, 16, strides=(8,8), padding='same', kernel_initializer=tf.random_normal_initializer(stddev=weights_stddev), kernel_regularizer=tf.contrib.layers.l2_regularizer(weights_l2), name='third_upsamplex8')
return third_upsamplex8
tests.test_layers(layers)
def optimize(nn_last_layer, correct_label, learning_rate, num_classes):
"""
Build the TensorFLow loss and optimizer operations.
:param nn_last_layer: TF Tensor of the last layer in the neural network
:param correct_label: TF Placeholder for the correct label image
:param learning_rate: TF Placeholder for the learning rate
:param num_classes: Number of classes to classify
:return: Tuple of (logits, train_op, cross_entropy_loss)
"""
# TODO: Implement function
logits = tf.reshape(nn_last_layer, (-1, num_classes))
correct_label = tf.reshape(correct_label, (-1, num_classes))
cross_entropy_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=correct_label))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
train_op = optimizer.minimize(cross_entropy_loss)
return logits, train_op, cross_entropy_loss
tests.test_optimize(optimize)
def train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image,
correct_label, keep_prob, learning_rate):
"""
Train neural network and print out the loss during training.
:param sess: TF Session
:param epochs: Number of epochs
:param batch_size: Batch size
:param get_batches_fn: Function to get batches of training data. Call using get_batches_fn(batch_size)
:param train_op: TF Operation to train the neural network
:param cross_entropy_loss: TF Tensor for the amount of loss
:param input_image: TF Placeholder for input images
:param correct_label: TF Placeholder for label images
:param keep_prob: TF Placeholder for dropout keep probability
:param learning_rate: TF Placeholder for learning rate
"""
# TODO: Implement function
sess.run(tf.global_variables_initializer())
print("Starting training for {} epochs".format(epochs))
print()
for epoch in range(epochs):
print('Epoch: {}'.format(epoch+1))
loss_log = []
for image, label in get_batches_fn(batch_size):
_, loss = sess.run([train_op, cross_entropy_loss], feed_dict={
input_image: image,
correct_label: label,
keep_prob: 0.5,
learning_rate: 0.00001
})
loss_log.append('{:3f}'.format(loss))
print(loss_log)
print("Training finished")
tests.test_train_nn(train_nn)
def run():
num_classes = 2
image_shape = (160, 576)
data_dir = './data'
runs_dir = './runs'
tests.test_for_kitti_dataset(data_dir)
# Download pretrained vgg model
helper.maybe_download_pretrained_vgg(data_dir)
# OPTIONAL: Train and Inference on the cityscapes dataset instead of the Kitti dataset.
# You'll need a GPU with at least 10 teraFLOPS to train on.
# https://www.cityscapes-dataset.com/
with tf.Session() as sess:
# Path to vgg model
vgg_path = os.path.join(data_dir, 'vgg')
# Create function to get batches
get_batches_fn = helper.gen_batch_function(os.path.join(data_dir, 'data_road/training'), image_shape)
# OPTIONAL: Augment Images for better results
# https://datascience.stackexchange.com/questions/5224/how-to-prepare-augment-images-for-neural-network
# Placeholders
correct_label = tf.placeholder(tf.int32, [None, None, None, num_classes], name='correct_label')
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
# TODO: Build NN using load_vgg, layers, and optimize function
input_image, keep_prob, layer3, layer4, layer7 = load_vgg(sess, vgg_path)
layer_output = layers(layer3, layer4, layer7, num_classes)
logits, train_op, cross_entropy_loss = optimize(layer_output, correct_label, learning_rate, num_classes)
# TODO: Train NN using the train_nn function
epochs = 48
batch_size = 5
train_nn(sess, epochs, batch_size, get_batches_fn, train_op, cross_entropy_loss, input_image, correct_label, keep_prob, learning_rate)
# TODO: Save inference data using helper.save_inference_samples
helper.save_inference_samples(runs_dir, data_dir, sess, image_shape, logits, keep_prob, input_image)
# OPTIONAL: Apply the trained model to a video
if __name__ == '__main__':
run()
| 47.272222
| 272
| 0.736044
|
4a12d902ebd96ef754ff7bf60994e736f1bb8602
| 2,014
|
py
|
Python
|
main.py
|
ToxicDeeModder1/Heroku-app.json
|
9843b1419bad1f4713dc9680f8a767ddb42bb074
|
[
"MIT"
] | null | null | null |
main.py
|
ToxicDeeModder1/Heroku-app.json
|
9843b1419bad1f4713dc9680f8a767ddb42bb074
|
[
"MIT"
] | null | null | null |
main.py
|
ToxicDeeModder1/Heroku-app.json
|
9843b1419bad1f4713dc9680f8a767ddb42bb074
|
[
"MIT"
] | null | null | null |
# (c) @AbirHasan2005
import os
from configs import Configs
from pyromod import listen
from asyncio import TimeoutError
from core.steps import StartSteps
from pyrogram import Client, filters, idle
from pyrogram.types import InlineKeyboardMarkup, InlineKeyboardButton, Message, CallbackQuery
app = Client(
session_name=Configs.SESSION_NAME,
api_id=Configs.API_ID,
api_hash=Configs.API_HASH,
bot_token=Configs.BOT_TOKEN
)
@app.on_message(filters.command("start") & filters.private & ~filters.edited)
async def start_command(_, m: Message):
await m.reply_text("*✋Hi, I am Heroku app.json maker bot.\n\n"
"To start making app.json for your heroku app, [/Joined]\n"
"Please Join Below Channel To Start Using Me\n\n🔵Developed By @ToxicDeeModderr*", quote=True, disable_web_page_preview=True)
@app.on_message(filters.command("/Joined") & ~filters.edited & filters.private)
async def f_command(bot: Client, m: Message):
editable = await m.reply_text("*Checking...*",
reply_markup=InlineKeyboardMarkup([
[InlineKeyboardButton("Cancel Process", callback_data="cancelProcess")]
]))
try:
app_json = await StartSteps(bot, editable)
if os.path.exists(app_json):
await bot.send_document(
chat_id=m.chat.id,
document=app_json,
caption="**Made by @TheBotsWorldChannel**"
)
await editable.edit("Sent `app.json` !!")
os.remove(app_json)
else:
await editable.edit("Failed to Make `app.json` !!\n\n"
"Try again.")
except TimeoutError:
pass
@app.on_callback_query()
async def cb_handler(_, cb: CallbackQuery):
if "cancelProcess" in cb.data:
await cb.message.edit("Process Cancelled!")
app.start()
print("Bot Started!")
idle()
app.stop()
print("Bot Stopped!")
| 33.566667
| 147
| 0.627607
|
4a12d92141bdca053c5d564f90f4af47e9a22d7b
| 15,585
|
py
|
Python
|
test/onnx/test_onnx_opset.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 7
|
2021-05-29T16:31:51.000Z
|
2022-02-21T18:52:25.000Z
|
test/onnx/test_onnx_opset.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 1
|
2021-06-25T22:00:31.000Z
|
2021-06-25T22:00:31.000Z
|
test/onnx/test_onnx_opset.py
|
Hacky-DH/pytorch
|
80dc4be615854570aa39a7e36495897d8a040ecc
|
[
"Intel"
] | 1
|
2021-10-05T07:05:26.000Z
|
2021-10-05T07:05:26.000Z
|
from test_pytorch_common import TestCase, run_tests
import torch
import torch.onnx
from torch.nn import Module
import onnx
import io
from torch.onnx.symbolic_helper import _export_onnx_opset_version
from torch.onnx import ir_version, producer_name, producer_version
def check_onnx_opset_operator(model, ops, opset_version=_export_onnx_opset_version):
# check_onnx_components
assert model.ir_version == ir_version and \
model.producer_name == producer_name and \
model.producer_version == producer_version and \
model.opset_import[0].version == opset_version
# check the schema with the onnx checker
onnx.checker.check_model(model)
# check target type and attributes
graph = model.graph
# ops should contain an object for each node
# in graph.node, in the right order.
# At least the op_name should be specified,
# but the op's attributes can optionally be
# specified as well
assert len(ops) == len(graph.node)
for i in range(0, len(ops)):
assert graph.node[i].op_type == ops[i]["op_name"]
if "attributes" in ops[i] :
attributes = ops[i]["attributes"]
assert len(attributes) == len(graph.node[i].attribute)
for j in range(0, len(attributes)):
for attribute_field in attributes[j].keys():
assert attributes[j][attribute_field] == getattr(graph.node[i].attribute[j], attribute_field)
def check_onnx_opsets_operator(module, x, ops, opset_versions, training=torch.onnx.TrainingMode.EVAL, example_outputs=None,
input_names=None, dynamic_axes=None):
for opset_version in opset_versions:
f = io.BytesIO()
torch.onnx.export(module, x, f,
opset_version=opset_version,
training=training,
example_outputs=example_outputs,
input_names=input_names,
dynamic_axes=dynamic_axes)
model = onnx.load(io.BytesIO(f.getvalue()))
check_onnx_opset_operator(model, ops[opset_version], opset_version)
class TestONNXOpset(TestCase):
def test_opset_fallback(self):
class MyModule(Module):
def forward(self, x):
return torch.isnan(x)
ops = [{"op_name" : "IsNaN"}]
ops = {9 : ops, 10 : ops}
x = torch.tensor([1.0, float("nan"), 2.0])
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
def test_topk(self):
class MyModule(Module):
def forward(self, x):
return torch.topk(x, 3)
ops_9 = [{"op_name": "TopK", "attributes": [{"name": "axis", "i": -1, "type": 2},
{"name": "k", "i": 3, "type": 2}]}]
ops_10 = [{"op_name": "TopK", "attributes": [{"name": "axis", "i": -1, "type": 2}]}]
ops = {9: ops_9, 10: ops_10}
x = torch.arange(1., 6., requires_grad=True)
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
# test with dynamic k
class MyModuleDynamic(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, input, k):
return torch.topk(input, k)
ops_10 = [{"op_name": "Constant", "attributes": [{"name": "value", "type": 4}]},
{"op_name": "Reshape"},
{"op_name": "TopK", "attributes": [{"name": "axis", "i": -1, "type": 2}]}]
ops = {10: ops_10}
x = torch.arange(1., 6., requires_grad=True)
k = torch.tensor(3)
module = MyModuleDynamic()
example_output = module(x, k)
check_onnx_opsets_operator(module, [x, k], ops,
opset_versions=[10],
example_outputs=example_output)
def test_maxpool(self):
module = torch.nn.MaxPool1d(2, stride=1)
ops_9 = [{"op_name" : "MaxPool",
"attributes" :
[{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7}]}]
ops_10 = [{"op_name" : "MaxPool",
"attributes" :
[{"name": "ceil_mode", "i": 0, "type": 2},
{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7}]}]
ops = {9 : ops_9, 10 : ops_10}
x = torch.randn(20, 16, 50)
check_onnx_opsets_operator(module, x, ops, opset_versions=[9, 10])
# add test with dilations
module = torch.nn.MaxPool1d(2, stride=1, dilation=2)
ops_10 = [{"op_name" : "MaxPool",
"attributes" :
[{"name": "ceil_mode", "i": 0, "type": 2},
{"name": "dilations", "ints": [2], "type": 7},
{"name": "kernel_shape", "ints": [2], "type": 7},
{"name": "pads", "ints": [0, 0], "type": 7},
{"name": "strides", "ints": [1], "type": 7}]}]
ops = {10 : ops_10}
x = torch.randn(20, 16, 50)
check_onnx_opsets_operator(module, x, ops, opset_versions=[10])
def test_upsample(self):
class MyModule(Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
size = [int(i) for i in size]
return torch.nn.functional.interpolate(x, size=size, mode="nearest")
module = MyModule()
ops8 = [{"op_name" : "Upsample", "attributes" : [{"name": "mode", "s": ("nearest").encode(), "type": 3},
{"name": "scales", "floats": [1.0, 1.0, 2.0, 2.0], "type": 6}]}]
ops9 = [{"op_name" : "Constant"},
{"op_name" : "Upsample", "attributes" : [{"name": "mode", "s": ("nearest").encode(), "type": 3}]}]
ops = {8 : ops8, 9 : ops9}
x = torch.randn(2, 2, 2, 2)
check_onnx_opsets_operator(module, x, ops, opset_versions=[8, 9])
def test_cast_constant(self):
class MyModule(Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, x):
return x - 1
module = MyModule()
ops_8 = [{"op_name" : "Constant"},
{"op_name" : "Cast", "attributes": [{"name": "to", "i": 7, "type": 2}]},
{"op_name" : "Sub"}]
ops_9 = [{"op_name" : "Constant"}, {"op_name" : "Sub"}]
ops = {8 : ops_8, 9 : ops_9}
x = torch.ones(5, 6, dtype=torch.long)
check_onnx_opsets_operator(module, x, ops, opset_versions=[8, 9])
def test_slice(self):
class MyModule(Module):
def forward(self, x):
return x[0:1]
ops_9 = [{"op_name" : "Slice",
"attributes" :
[{"name": "axes", "ints": [0], "type": 7},
{"name": "ends", "ints": [1], "type": 7},
{"name": "starts", "ints": [0], "type": 7}]}]
ops_10 = [{"op_name" : "Constant"},
{"op_name" : "Constant"},
{"op_name" : "Constant"},
{"op_name" : "Constant"},
{"op_name" : "Slice",
"attributes" : []}]
ops = {9 : ops_9, 10 : ops_10}
x = torch.randn(3)
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
class DynamicSliceModel(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
return x[1:x.size(0)]
module = DynamicSliceModel()
x = torch.rand(1, 2)
example_output = module(x)
ops_10 = [{"op_name" : "Shape"},
{"op_name" : "Constant"},
{"op_name" : "Gather",
"attributes" : [{"name" : "axis", "i" : 0, "type" : 2}]},
{"op_name" : "Unsqueeze",
"attributes" : [{"name" : "axes", "i" : 0, "type" : 7}]},
{"op_name": "Constant"},
{"op_name" : "Slice",
"attributes" : []}]
ops = {10 : ops_10}
check_onnx_opsets_operator(module, x, ops, opset_versions=[10], example_outputs=example_output,
input_names=['x'], dynamic_axes={"x": [0, 1]})
ops_10 = [{"op_name" : "Constant"},
{"op_name" : "Constant"},
{"op_name" : "Constant"},
{"op_name" : "Constant"},
{"op_name" : "Slice",
"attributes" : []}]
ops = {10 : ops_10}
check_onnx_opsets_operator(module, x, ops, opset_versions=[10], example_outputs=example_output)
def test_flip(self):
class MyModule(Module):
def forward(self, x):
return torch.flip(x, dims=[0])
ops_10 = [{"op_name" : "Constant"},
{"op_name" : "Constant"},
{"op_name" : "Constant"},
{"op_name" : "Constant"},
{"op_name" : "Slice",
"attributes" : []}]
ops = {10 : ops_10}
import numpy
x = torch.tensor(numpy.arange(6.0).reshape(2, 3))
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[10])
def test_dropout(self):
class MyModule(Module):
def __init__(self):
super(MyModule, self).__init__()
self.dropout = torch.nn.Dropout(0.5)
def forward(self, x):
return self.dropout(x)
x = torch.randn(1, 2, 3)
# we should only export the onnx Dropout op in training mode; test both modes
# test training mode
ops = [{"op_name" : "Dropout", "attributes" : [{"name" : "ratio", "f" : 0.5, "type" : 1}]}]
ops = {9 : ops, 10 : ops}
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10], training=torch.onnx.TrainingMode.TRAINING)
# test eval mode
ops = [{"op_name" : "Identity"}]
ops = {9 : ops, 10 : ops}
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10], training=torch.onnx.TrainingMode.EVAL)
def test_full(self):
class MyModule(Module):
def forward(self, x):
return torch.full((3, 4), x)
ops = [{"op_name" : "Constant"},
{"op_name" : "ConstantOfShape"},
{"op_name" : "Add"}]
ops = {9 : ops, 10 : ops}
x = torch.tensor(12.)
check_onnx_opsets_operator(MyModule(), x, ops, opset_versions=[9, 10])
def test_interpolate(self):
class MyModel(torch.nn.Module):
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
return torch.nn.functional.interpolate(x,
size=size,
mode="nearest")
ops_9 = [{"op_name" : "Shape"},
{"op_name" : "Constant"},
{"op_name" : "Gather"},
{"op_name" : "Shape"},
{"op_name" : "Constant"},
{"op_name" : "Gather"},
{"op_name" : "Constant"},
{"op_name" : "Mul"},
{"op_name" : "Constant"},
{"op_name" : "Mul"},
{"op_name" : "Unsqueeze"},
{"op_name" : "Unsqueeze"},
{"op_name" : "Concat"},
{"op_name" : "Constant"},
{"op_name" : "Cast"},
{"op_name" : "Shape"},
{"op_name" : "Slice"},
{"op_name" : "Cast"},
{"op_name" : "Div"},
{"op_name" : "Concat"},
{"op_name" : "Upsample",
"attributes" :
[{"name": "mode", "s": ("nearest").encode(), "type": 3}]}]
ops_10 = [{"op_name" : "Shape"},
{"op_name" : "Constant"},
{"op_name" : "Gather"},
{"op_name" : "Shape"},
{"op_name" : "Constant"},
{"op_name" : "Gather"},
{"op_name" : "Constant"},
{"op_name" : "Mul"},
{"op_name" : "Constant"},
{"op_name" : "Mul"},
{"op_name" : "Unsqueeze"},
{"op_name" : "Unsqueeze"},
{"op_name" : "Concat"},
{"op_name" : "Constant"},
{"op_name" : "Cast"},
{"op_name" : "Shape"},
{"op_name" : "Constant"},
{"op_name" : "Constant"},
{"op_name" : "Constant"},
{"op_name" : "Slice"},
{"op_name" : "Cast"},
{"op_name" : "Div"},
{"op_name" : "Concat"},
{"op_name" : "Resize",
"attributes" :
[{"name": "mode", "s": ("nearest").encode(), "type": 3}]}]
ops = {9 : ops_9, 10 : ops_10}
x = torch.randn(1, 2, 3, 4, requires_grad=True)
check_onnx_opsets_operator(MyModel(), x, ops, opset_versions=[9, 10],
input_names=["x"], dynamic_axes={"x": [0, 1, 2, 3]})
ops_9 = [{"op_name" : "Constant"},
{"op_name" : "Shape"},
{"op_name" : "Slice"},
{"op_name" : "Cast"},
{"op_name" : "Div"},
{"op_name" : "Concat"},
{"op_name" : "Upsample",
"attributes" :
[{"name": "mode", "s": ("nearest").encode(), "type": 3}]}]
ops_10 = [{"op_name" : "Constant"},
{"op_name" : "Shape"},
{"op_name" : "Constant"},
{"op_name" : "Constant"},
{"op_name" : "Constant"},
{"op_name" : "Slice"},
{"op_name" : "Cast"},
{"op_name" : "Div"},
{"op_name" : "Concat"},
{"op_name" : "Resize"}]
ops = {9 : ops_9, 10 : ops_10}
x = torch.randn(1, 2, 3, 4, requires_grad=True)
check_onnx_opsets_operator(MyModel(), x, ops, opset_versions=[9, 10])
class MyDynamicModel(torch.nn.Module):
def forward(self, x):
size = [v * 2 for v in x.size()[2:]]
# work around for now: turn the dynamic sizes into constant
size = [int(i) for i in size]
return torch.nn.functional.interpolate(x,
size=size,
mode="nearest")
ops_9 = [{"op_name" : "Constant"},
{"op_name" : "Upsample",
"attributes" :
[{"name": "mode", "s": ("nearest").encode(), "type": 3}]}]
ops_10 = [{"op_name" : "Constant"},
{"op_name" : "Resize",
"attributes" :
[{"name": "mode", "s": ("nearest").encode(), "type": 3}]}]
ops = {9 : ops_9, 10 : ops_10}
x = torch.randn(20, 16, 50)
check_onnx_opsets_operator(MyDynamicModel(), x, ops, opset_versions=[9, 10])
if __name__ == "__main__":
run_tests()
| 41.339523
| 123
| 0.468592
|
4a12d973a2344f639da6c45ae67ad5d167ad1fe0
| 46,939
|
py
|
Python
|
web3/contract.py
|
egalano/web3.py
|
9e63d1885ad2f56cf2c434f7bb016b5e07a638e0
|
[
"MIT"
] | null | null | null |
web3/contract.py
|
egalano/web3.py
|
9e63d1885ad2f56cf2c434f7bb016b5e07a638e0
|
[
"MIT"
] | null | null | null |
web3/contract.py
|
egalano/web3.py
|
9e63d1885ad2f56cf2c434f7bb016b5e07a638e0
|
[
"MIT"
] | null | null | null |
"""Interaction with smart contracts over Web3 connector.
"""
import copy
import itertools
from eth_abi import (
decode_abi,
)
from eth_abi.exceptions import (
DecodingError,
)
from eth_utils import (
add_0x_prefix,
encode_hex,
function_abi_to_4byte_selector,
is_list_like,
is_text,
to_tuple,
)
from hexbytes import (
HexBytes,
)
from web3._utils.abi import (
abi_to_signature,
check_if_arguments_can_be_encoded,
fallback_func_abi_exists,
filter_by_type,
get_abi_output_types,
get_constructor_abi,
is_array_type,
map_abi_data,
merge_args_and_kwargs,
)
from web3._utils.blocks import (
is_hex_encoded_block_hash,
)
from web3._utils.contracts import (
encode_abi,
find_matching_event_abi,
find_matching_fn_abi,
get_function_info,
prepare_transaction,
)
from web3._utils.datatypes import (
PropertyCheckingFactory,
)
from web3._utils.decorators import (
combomethod,
deprecated_for,
)
from web3._utils.empty import (
empty,
)
from web3._utils.encoding import (
to_4byte_hex,
to_hex,
)
from web3._utils.events import (
EventFilterBuilder,
get_event_data,
is_dynamic_sized_type,
)
from web3._utils.filters import (
construct_event_filter_params,
)
from web3._utils.function_identifiers import (
FallbackFn,
)
from web3._utils.normalizers import (
BASE_RETURN_NORMALIZERS,
normalize_abi,
normalize_address,
normalize_bytecode,
)
from web3._utils.toolz import (
compose,
partial,
)
from web3._utils.transactions import (
fill_transaction_defaults,
)
from web3.exceptions import (
BadFunctionCallOutput,
BlockNumberOutofRange,
FallbackNotFound,
MismatchedABI,
NoABIEventsFound,
NoABIFound,
NoABIFunctionsFound,
)
ACCEPTABLE_EMPTY_STRINGS = ["0x", b"0x", "", b""]
class ContractFunctions:
"""Class containing contract function objects
"""
def __init__(self, abi, web3, address=None):
self.abi = abi
self.web3 = web3
self.address = address
if self.abi:
self._functions = filter_by_type('function', self.abi)
for func in self._functions:
setattr(
self,
func['name'],
ContractFunction.factory(
func['name'],
web3=self.web3,
contract_abi=self.abi,
address=self.address,
function_identifier=func['name']))
def __iter__(self):
if not hasattr(self, '_functions') or not self._functions:
return
for func in self._functions:
yield func['name']
def __getattr__(self, function_name):
if self.abi is None:
raise NoABIFound(
"There is no ABI found for this contract.",
)
if '_functions' not in self.__dict__:
raise NoABIFunctionsFound(
"The abi for this contract contains no function definitions. ",
"Are you sure you provided the correct contract abi?"
)
elif function_name not in self.__dict__['_functions']:
raise MismatchedABI(
"The function '{}' was not found in this contract's abi. ".format(function_name),
"Are you sure you provided the correct contract abi?"
)
else:
return super().__getattribute__(function_name)
def __getitem__(self, function_name):
return getattr(self, function_name)
class ContractEvents:
"""Class containing contract event objects
This is available via:
.. code-block:: python
>>> mycontract.events
<web3.contract.ContractEvents object at 0x108afde10>
To get list of all supported events in the contract ABI.
This allows you to iterate over :class:`ContractEvent` proxy classes.
.. code-block:: python
>>> for e in mycontract.events: print(e)
<class 'web3._utils.datatypes.LogAnonymous'>
...
"""
def __init__(self, abi, web3, address=None):
if abi:
self.abi = abi
self._events = filter_by_type('event', self.abi)
for event in self._events:
setattr(
self,
event['name'],
ContractEvent.factory(
event['name'],
web3=web3,
contract_abi=self.abi,
address=address,
event_name=event['name']))
def __getattr__(self, event_name):
if '_events' not in self.__dict__:
raise NoABIEventsFound(
"The abi for this contract contains no event definitions. ",
"Are you sure you provided the correct contract abi?"
)
elif event_name not in self.__dict__['_events']:
raise MismatchedABI(
"The event '{}' was not found in this contract's abi. ".format(event_name),
"Are you sure you provided the correct contract abi?"
)
else:
return super().__getattribute__(event_name)
def __getitem__(self, event_name):
return getattr(self, event_name)
def __iter__(self):
"""Iterate over supported
:return: Iterable of :class:`ContractEvent`
"""
for event in self._events:
yield self[event['name']]
class Contract:
"""Base class for Contract proxy classes.
First you need to create your Contract classes using
:meth:`web3.eth.Eth.contract` that takes compiled Solidity contract
ABI definitions as input. The created class object will be a subclass of
this base class.
After you have your Contract proxy class created you can interact with
smart contracts
* Create a Contract proxy object for an existing deployed smart contract by
its address using :meth:`__init__`
* Deploy a new smart contract using :py:meth:`Contract.deploy`
"""
# set during class construction
web3 = None
# instance level properties
address = None
# class properties (overridable at instance level)
abi = None
asm = None
ast = None
bytecode = None
bytecode_runtime = None
clone_bin = None
functions = None
caller = None
#: Instance of :class:`ContractEvents` presenting available Event ABIs
events = None
dev_doc = None
interface = None
metadata = None
opcodes = None
src_map = None
src_map_runtime = None
user_doc = None
def __init__(self, address=None):
"""Create a new smart contract proxy object.
:param address: Contract address as 0x hex string
"""
if self.web3 is None:
raise AttributeError(
'The `Contract` class has not been initialized. Please use the '
'`web3.contract` interface to create your contract class.'
)
if address:
self.address = normalize_address(self.web3.ens, address)
if not self.address:
raise TypeError("The address argument is required to instantiate a contract.")
self.functions = ContractFunctions(self.abi, self.web3, self.address)
self.caller = ContractCaller(self.abi, self.web3, self.address)
self.events = ContractEvents(self.abi, self.web3, self.address)
self.fallback = Contract.get_fallback_function(self.abi, self.web3, self.address)
@classmethod
def factory(cls, web3, class_name=None, **kwargs):
kwargs['web3'] = web3
normalizers = {
'abi': normalize_abi,
'address': partial(normalize_address, kwargs['web3'].ens),
'bytecode': normalize_bytecode,
'bytecode_runtime': normalize_bytecode,
}
contract = PropertyCheckingFactory(
class_name or cls.__name__,
(cls,),
kwargs,
normalizers=normalizers,
)
contract.functions = ContractFunctions(contract.abi, contract.web3)
contract.caller = ContractCaller(contract.abi, contract.web3, contract.address)
contract.events = ContractEvents(contract.abi, contract.web3)
contract.fallback = Contract.get_fallback_function(contract.abi, contract.web3)
return contract
#
# Contract Methods
#
@classmethod
def constructor(cls, *args, **kwargs):
"""
:param args: The contract constructor arguments as positional arguments
:param kwargs: The contract constructor arguments as keyword arguments
:return: a contract constructor object
"""
if cls.bytecode is None:
raise ValueError(
"Cannot call constructor on a contract that does not have 'bytecode' associated "
"with it"
)
return ContractConstructor(cls.web3,
cls.abi,
cls.bytecode,
*args,
**kwargs)
# Public API
#
@combomethod
def encodeABI(cls, fn_name, args=None, kwargs=None, data=None):
"""
Encodes the arguments using the Ethereum ABI for the contract function
that matches the given name and arguments..
:param data: defaults to function selector
"""
fn_abi, fn_selector, fn_arguments = get_function_info(
fn_name, contract_abi=cls.abi, args=args, kwargs=kwargs,
)
if data is None:
data = fn_selector
return encode_abi(cls.web3, fn_abi, fn_arguments, data)
@combomethod
def all_functions(self):
return find_functions_by_identifier(
self.abi, self.web3, self.address, lambda _: True
)
@combomethod
def get_function_by_signature(self, signature):
if ' ' in signature:
raise ValueError(
'Function signature should not contain any spaces. '
'Found spaces in input: %s' % signature
)
def callable_check(fn_abi):
return abi_to_signature(fn_abi) == signature
fns = find_functions_by_identifier(self.abi, self.web3, self.address, callable_check)
return get_function_by_identifier(fns, 'signature')
@combomethod
def find_functions_by_name(self, fn_name):
def callable_check(fn_abi):
return fn_abi['name'] == fn_name
return find_functions_by_identifier(
self.abi, self.web3, self.address, callable_check
)
@combomethod
def get_function_by_name(self, fn_name):
fns = self.find_functions_by_name(fn_name)
return get_function_by_identifier(fns, 'name')
@combomethod
def get_function_by_selector(self, selector):
def callable_check(fn_abi):
return encode_hex(function_abi_to_4byte_selector(fn_abi)) == to_4byte_hex(selector)
fns = find_functions_by_identifier(self.abi, self.web3, self.address, callable_check)
return get_function_by_identifier(fns, 'selector')
@combomethod
def decode_function_input(self, data):
data = HexBytes(data)
selector, params = data[:4], data[4:]
func = self.get_function_by_selector(selector)
names = [x['name'] for x in func.abi['inputs']]
types = [x['type'] for x in func.abi['inputs']]
decoded = decode_abi(types, params)
normalized = map_abi_data(BASE_RETURN_NORMALIZERS, types, decoded)
return func, dict(zip(names, normalized))
@combomethod
def find_functions_by_args(self, *args):
def callable_check(fn_abi):
return check_if_arguments_can_be_encoded(fn_abi, args=args, kwargs={})
return find_functions_by_identifier(
self.abi, self.web3, self.address, callable_check
)
@combomethod
def get_function_by_args(self, *args):
fns = self.find_functions_by_args(*args)
return get_function_by_identifier(fns, 'args')
#
# Private Helpers
#
_return_data_normalizers = tuple()
@classmethod
def _prepare_transaction(cls,
fn_name,
fn_args=None,
fn_kwargs=None,
transaction=None):
return prepare_transaction(
cls.address,
cls.web3,
fn_identifier=fn_name,
contract_abi=cls.abi,
transaction=transaction,
fn_args=fn_args,
fn_kwargs=fn_kwargs,
)
@classmethod
def _find_matching_fn_abi(cls, fn_identifier=None, args=None, kwargs=None):
return find_matching_fn_abi(cls.abi,
fn_identifier=fn_identifier,
args=args,
kwargs=kwargs)
@classmethod
def _find_matching_event_abi(cls, event_name=None, argument_names=None):
return find_matching_event_abi(
abi=cls.abi,
event_name=event_name,
argument_names=argument_names)
@staticmethod
def get_fallback_function(abi, web3, address=None):
if abi and fallback_func_abi_exists(abi):
return ContractFunction.factory(
'fallback',
web3=web3,
contract_abi=abi,
address=address,
function_identifier=FallbackFn)()
return NonExistentFallbackFunction()
@combomethod
def _encode_constructor_data(cls, args=None, kwargs=None):
constructor_abi = get_constructor_abi(cls.abi)
if constructor_abi:
if args is None:
args = tuple()
if kwargs is None:
kwargs = {}
arguments = merge_args_and_kwargs(constructor_abi, args, kwargs)
deploy_data = add_0x_prefix(
encode_abi(cls.web3, constructor_abi, arguments, data=cls.bytecode)
)
else:
deploy_data = to_hex(cls.bytecode)
return deploy_data
def mk_collision_prop(fn_name):
def collision_fn():
msg = "Namespace collision for function name {0} with ConciseContract API.".format(fn_name)
raise AttributeError(msg)
collision_fn.__name__ = fn_name
return collision_fn
class ContractConstructor:
"""
Class for contract constructor API.
"""
def __init__(self, web3, abi, bytecode, *args, **kwargs):
self.web3 = web3
self.abi = abi
self.bytecode = bytecode
self.data_in_transaction = self._encode_data_in_transaction(*args, **kwargs)
@combomethod
def _encode_data_in_transaction(self, *args, **kwargs):
constructor_abi = get_constructor_abi(self.abi)
if constructor_abi:
if not args:
args = tuple()
if not kwargs:
kwargs = {}
arguments = merge_args_and_kwargs(constructor_abi, args, kwargs)
data = add_0x_prefix(
encode_abi(self.web3, constructor_abi, arguments, data=self.bytecode)
)
else:
data = to_hex(self.bytecode)
return data
@combomethod
def estimateGas(self, transaction=None):
if transaction is None:
estimate_gas_transaction = {}
else:
estimate_gas_transaction = dict(**transaction)
self.check_forbidden_keys_in_transaction(estimate_gas_transaction,
["data", "to"])
if self.web3.eth.defaultAccount is not empty:
estimate_gas_transaction.setdefault('from', self.web3.eth.defaultAccount)
estimate_gas_transaction['data'] = self.data_in_transaction
return self.web3.eth.estimateGas(estimate_gas_transaction)
@combomethod
def transact(self, transaction=None):
if transaction is None:
transact_transaction = {}
else:
transact_transaction = dict(**transaction)
self.check_forbidden_keys_in_transaction(transact_transaction,
["data", "to"])
if self.web3.eth.defaultAccount is not empty:
transact_transaction.setdefault('from', self.web3.eth.defaultAccount)
transact_transaction['data'] = self.data_in_transaction
# TODO: handle asynchronous contract creation
return self.web3.eth.sendTransaction(transact_transaction)
@combomethod
def buildTransaction(self, transaction=None):
"""
Build the transaction dictionary without sending
"""
if transaction is None:
built_transaction = {}
else:
built_transaction = dict(**transaction)
self.check_forbidden_keys_in_transaction(built_transaction,
["data", "to"])
if self.web3.eth.defaultAccount is not empty:
built_transaction.setdefault('from', self.web3.eth.defaultAccount)
built_transaction['data'] = self.data_in_transaction
built_transaction['to'] = b''
return fill_transaction_defaults(self.web3, built_transaction)
@staticmethod
def check_forbidden_keys_in_transaction(transaction, forbidden_keys=None):
keys_found = set(transaction.keys()) & set(forbidden_keys)
if keys_found:
raise ValueError("Cannot set {} in transaction".format(', '.join(keys_found)))
class ConciseMethod:
ALLOWED_MODIFIERS = {'call', 'estimateGas', 'transact', 'buildTransaction'}
def __init__(self, function, normalizers=None):
self._function = function
self._function._return_data_normalizers = normalizers
def __call__(self, *args, **kwargs):
return self.__prepared_function(*args, **kwargs)
def __prepared_function(self, *args, **kwargs):
if not kwargs:
modifier, modifier_dict = 'call', {}
elif len(kwargs) == 1:
modifier, modifier_dict = kwargs.popitem()
if modifier not in self.ALLOWED_MODIFIERS:
raise TypeError(
"The only allowed keyword arguments are: %s" % self.ALLOWED_MODIFIERS)
else:
raise TypeError("Use up to one keyword argument, one of: %s" % self.ALLOWED_MODIFIERS)
return getattr(self._function(*args), modifier)(modifier_dict)
class ConciseContract:
"""
An alternative Contract Factory which invokes all methods as `call()`,
unless you add a keyword argument. The keyword argument assigns the prep method.
This call
> contract.withdraw(amount, transact={'from': eth.accounts[1], 'gas': 100000, ...})
is equivalent to this call in the classic contract:
> contract.functions.withdraw(amount).transact({'from': eth.accounts[1], 'gas': 100000, ...})
"""
@deprecated_for(
"contract.caller.<method name> or contract.caller({transaction_dict}).<method name>"
)
def __init__(self, classic_contract, method_class=ConciseMethod):
classic_contract._return_data_normalizers += CONCISE_NORMALIZERS
self._classic_contract = classic_contract
self.address = self._classic_contract.address
protected_fn_names = [fn for fn in dir(self) if not fn.endswith('__')]
for fn_name in self._classic_contract.functions:
# Override namespace collisions
if fn_name in protected_fn_names:
_concise_method = mk_collision_prop(fn_name)
else:
_classic_method = getattr(
self._classic_contract.functions,
fn_name)
_concise_method = method_class(
_classic_method,
self._classic_contract._return_data_normalizers
)
setattr(self, fn_name, _concise_method)
@classmethod
def factory(cls, *args, **kwargs):
return compose(cls, Contract.factory(*args, **kwargs))
def _none_addr(datatype, data):
if datatype == 'address' and int(data, base=16) == 0:
return (datatype, None)
else:
return (datatype, data)
CONCISE_NORMALIZERS = (
_none_addr,
)
class ImplicitMethod(ConciseMethod):
def __call_by_default(self, args):
function_abi = find_matching_fn_abi(self._function.contract_abi,
fn_identifier=self._function.function_identifier,
args=args)
return function_abi['constant'] if 'constant' in function_abi.keys() else False
@deprecated_for("classic contract syntax. Ex: contract.functions.withdraw(amount).transact({})")
def __call__(self, *args, **kwargs):
# Modifier is not provided and method is not constant/pure do a transaction instead
if not kwargs and not self.__call_by_default(args):
return super().__call__(*args, transact={})
else:
return super().__call__(*args, **kwargs)
class ImplicitContract(ConciseContract):
"""
ImplicitContract class is similar to the ConciseContract class
however it performs a transaction instead of a call if no modifier
is given and the method is not marked 'constant' in the ABI.
The transaction will use the default account to send the transaction.
This call
> contract.withdraw(amount)
is equivalent to this call in the classic contract:
> contract.functions.withdraw(amount).transact({})
"""
def __init__(self, classic_contract, method_class=ImplicitMethod):
super().__init__(classic_contract, method_class=method_class)
class NonExistentFallbackFunction:
@staticmethod
def _raise_exception():
raise FallbackNotFound("No fallback function was found in the contract ABI.")
def __getattr__(self, attr):
return NonExistentFallbackFunction._raise_exception
class ContractFunction:
"""Base class for contract functions
A function accessed via the api contract.functions.myMethod(*args, **kwargs)
is a subclass of this class.
"""
address = None
function_identifier = None
web3 = None
contract_abi = None
abi = None
transaction = None
arguments = None
def __init__(self, abi=None):
self.abi = abi
self.fn_name = type(self).__name__
def __call__(self, *args, **kwargs):
clone = copy.copy(self)
if args is None:
clone.args = tuple()
else:
clone.args = args
if kwargs is None:
clone.kwargs = {}
else:
clone.kwargs = kwargs
clone._set_function_info()
return clone
def _set_function_info(self):
if not self.abi:
self.abi = find_matching_fn_abi(
self.contract_abi,
self.function_identifier,
self.args,
self.kwargs
)
if self.function_identifier is FallbackFn:
self.selector = encode_hex(b'')
elif is_text(self.function_identifier):
self.selector = encode_hex(function_abi_to_4byte_selector(self.abi))
else:
raise TypeError("Unsupported function identifier")
self.arguments = merge_args_and_kwargs(self.abi, self.args, self.kwargs)
def call(self, transaction=None, block_identifier='latest'):
"""
Execute a contract function call using the `eth_call` interface.
This method prepares a ``Caller`` object that exposes the contract
functions and public variables as callable Python functions.
Reading a public ``owner`` address variable example:
.. code-block:: python
ContractFactory = w3.eth.contract(
abi=wallet_contract_definition["abi"]
)
# Not a real contract address
contract = ContractFactory("0x2f70d3d26829e412A602E83FE8EeBF80255AEeA5")
# Read "owner" public variable
addr = contract.functions.owner().call()
:param transaction: Dictionary of transaction info for web3 interface
:return: ``Caller`` object that has contract public functions
and variables exposed as Python methods
"""
if transaction is None:
call_transaction = {}
else:
call_transaction = dict(**transaction)
if 'data' in call_transaction:
raise ValueError("Cannot set data in call transaction")
if self.address:
call_transaction.setdefault('to', self.address)
if self.web3.eth.defaultAccount is not empty:
call_transaction.setdefault('from', self.web3.eth.defaultAccount)
if 'to' not in call_transaction:
if isinstance(self, type):
raise ValueError(
"When using `Contract.[methodtype].[method].call()` from"
" a contract factory you "
"must provide a `to` address with the transaction"
)
else:
raise ValueError(
"Please ensure that this contract instance has an address."
)
block_id = parse_block_identifier(self.web3, block_identifier)
return call_contract_function(
self.web3,
self.address,
self._return_data_normalizers,
self.function_identifier,
call_transaction,
block_id,
self.contract_abi,
self.abi,
*self.args,
**self.kwargs
)
def transact(self, transaction=None):
if transaction is None:
transact_transaction = {}
else:
transact_transaction = dict(**transaction)
if 'data' in transact_transaction:
raise ValueError("Cannot set data in transact transaction")
if self.address is not None:
transact_transaction.setdefault('to', self.address)
if self.web3.eth.defaultAccount is not empty:
transact_transaction.setdefault('from', self.web3.eth.defaultAccount)
if 'to' not in transact_transaction:
if isinstance(self, type):
raise ValueError(
"When using `Contract.transact` from a contract factory you "
"must provide a `to` address with the transaction"
)
else:
raise ValueError(
"Please ensure that this contract instance has an address."
)
return transact_with_contract_function(
self.address,
self.web3,
self.function_identifier,
transact_transaction,
self.contract_abi,
self.abi,
*self.args,
**self.kwargs
)
def estimateGas(self, transaction=None):
if transaction is None:
estimate_gas_transaction = {}
else:
estimate_gas_transaction = dict(**transaction)
if 'data' in estimate_gas_transaction:
raise ValueError("Cannot set data in estimateGas transaction")
if 'to' in estimate_gas_transaction:
raise ValueError("Cannot set to in estimateGas transaction")
if self.address:
estimate_gas_transaction.setdefault('to', self.address)
if self.web3.eth.defaultAccount is not empty:
estimate_gas_transaction.setdefault('from', self.web3.eth.defaultAccount)
if 'to' not in estimate_gas_transaction:
if isinstance(self, type):
raise ValueError(
"When using `Contract.estimateGas` from a contract factory "
"you must provide a `to` address with the transaction"
)
else:
raise ValueError(
"Please ensure that this contract instance has an address."
)
return estimate_gas_for_function(
self.address,
self.web3,
self.function_identifier,
estimate_gas_transaction,
self.contract_abi,
self.abi,
*self.args,
**self.kwargs
)
def buildTransaction(self, transaction=None):
"""
Build the transaction dictionary without sending
"""
if transaction is None:
built_transaction = {}
else:
built_transaction = dict(**transaction)
if 'data' in built_transaction:
raise ValueError("Cannot set data in build transaction")
if not self.address and 'to' not in built_transaction:
raise ValueError(
"When using `ContractFunction.buildTransaction` from a contract factory "
"you must provide a `to` address with the transaction"
)
if self.address and 'to' in built_transaction:
raise ValueError("Cannot set to in contract call build transaction")
if self.address:
built_transaction.setdefault('to', self.address)
if 'to' not in built_transaction:
raise ValueError(
"Please ensure that this contract instance has an address."
)
return build_transaction_for_function(
self.address,
self.web3,
self.function_identifier,
built_transaction,
self.contract_abi,
self.abi,
*self.args,
**self.kwargs
)
@combomethod
def _encode_transaction_data(cls):
return add_0x_prefix(encode_abi(cls.web3, cls.abi, cls.arguments, cls.selector))
_return_data_normalizers = tuple()
@classmethod
def factory(cls, class_name, **kwargs):
return PropertyCheckingFactory(class_name, (cls,), kwargs)(kwargs.get('abi'))
def __repr__(self):
if self.abi:
_repr = '<Function %s' % abi_to_signature(self.abi)
if self.arguments is not None:
_repr += ' bound to %r' % (self.arguments,)
return _repr + '>'
return '<Function %s>' % self.fn_name
class ContractEvent:
"""Base class for contract events
An event accessed via the api contract.events.myEvents(*args, **kwargs)
is a subclass of this class.
"""
address = None
event_name = None
web3 = None
contract_abi = None
abi = None
def __init__(self, *argument_names):
if argument_names is None:
self.argument_names = tuple()
else:
self.argument_names = argument_names
self.abi = self._get_event_abi()
@classmethod
def _get_event_abi(cls):
return find_matching_event_abi(
cls.contract_abi,
event_name=cls.event_name)
@combomethod
def processReceipt(self, txn_receipt):
return self._parse_logs(txn_receipt)
@to_tuple
def _parse_logs(self, txn_receipt):
for log in txn_receipt['logs']:
try:
decoded_log = get_event_data(self.abi, log)
except MismatchedABI:
continue
yield decoded_log
@combomethod
def createFilter(
self, *, # PEP 3102
argument_filters=None,
fromBlock=None,
toBlock="latest",
address=None,
topics=None):
"""
Create filter object that tracks logs emitted by this contract event.
:param filter_params: other parameters to limit the events
"""
if fromBlock is None:
raise TypeError("Missing mandatory keyword argument to createFilter: fromBlock")
if argument_filters is None:
argument_filters = dict()
_filters = dict(**argument_filters)
event_abi = self._get_event_abi()
check_for_forbidden_api_filter_arguments(event_abi, _filters)
_, event_filter_params = construct_event_filter_params(
self._get_event_abi(),
contract_address=self.address,
argument_filters=_filters,
fromBlock=fromBlock,
toBlock=toBlock,
address=address,
topics=topics,
)
filter_builder = EventFilterBuilder(event_abi)
filter_builder.address = event_filter_params.get('address')
filter_builder.fromBlock = event_filter_params.get('fromBlock')
filter_builder.toBlock = event_filter_params.get('toBlock')
match_any_vals = {
arg: value for arg, value in _filters.items()
if not is_array_type(filter_builder.args[arg].arg_type) and is_list_like(value)
}
for arg, value in match_any_vals.items():
filter_builder.args[arg].match_any(*value)
match_single_vals = {
arg: value for arg, value in _filters.items()
if not is_array_type(filter_builder.args[arg].arg_type) and not is_list_like(value)
}
for arg, value in match_single_vals.items():
filter_builder.args[arg].match_single(value)
log_filter = filter_builder.deploy(self.web3)
log_filter.log_entry_formatter = get_event_data(self._get_event_abi())
log_filter.builder = filter_builder
return log_filter
@combomethod
def build_filter(self):
builder = EventFilterBuilder(
self._get_event_abi(),
formatter=get_event_data(self._get_event_abi()))
builder.address = self.address
return builder
@combomethod
def getLogs(self,
argument_filters=None,
fromBlock=1,
toBlock="latest"):
"""Get events for this contract instance using eth_getLogs API.
This is a stateless method, as opposed to createFilter.
It can be safely called against nodes which do not provide
eth_newFilter API, like Infura nodes.
If no block range is provided and there are many events,
like ``Transfer`` events for a popular token,
the Ethereum node might be overloaded and timeout
on the underlying JSON-RPC call.
Example - how to get all ERC-20 token transactions
for the latest 10 blocks:
.. code-block:: python
from = max(mycontract.web3.eth.blockNumber - 10, 1)
to = mycontract.web3.eth.blockNumber
events = mycontract.events.Transfer.getLogs(fromBlock=from, toBlock=to)
for e in events:
print(e["args"]["from"],
e["args"]["to"],
e["args"]["value"])
The returned processed log values will look like:
.. code-block:: python
(
AttributeDict({
'args': AttributeDict({}),
'event': 'LogNoArguments',
'logIndex': 0,
'transactionIndex': 0,
'transactionHash': HexBytes('...'),
'address': '0xF2E246BB76DF876Cef8b38ae84130F4F55De395b',
'blockHash': HexBytes('...'),
'blockNumber': 3
}),
AttributeDict(...),
...
)
See also: :func:`web3.middleware.filter.local_filter_middleware`.
:param argument_filters:
:param fromBlock: block number, defaults to 1
:param toBlock: block number or "latest". Defaults to "latest"
:yield: Tuple of :class:`AttributeDict` instances
"""
if not self.address:
raise TypeError("This method can be only called on "
"an instated contract with an address")
abi = self._get_event_abi()
if argument_filters is None:
argument_filters = dict()
_filters = dict(**argument_filters)
# Construct JSON-RPC raw filter presentation based on human readable Python descriptions
# Namely, convert event names to their keccak signatures
data_filter_set, event_filter_params = construct_event_filter_params(
abi,
contract_address=self.address,
argument_filters=_filters,
fromBlock=fromBlock,
toBlock=toBlock,
address=self.address,
)
# Call JSON-RPC API
logs = self.web3.eth.getLogs(event_filter_params)
# Convert raw binary data to Python proxy objects as described by ABI
return tuple(get_event_data(abi, entry) for entry in logs)
@classmethod
def factory(cls, class_name, **kwargs):
return PropertyCheckingFactory(class_name, (cls,), kwargs)
class ContractCaller:
"""
An alternative Contract API.
This call:
> contract.caller({'from': eth.accounts[1], 'gas': 100000, ...}).add(2, 3)
is equivalent to this call in the classic contract:
> contract.functions.add(2, 3).call({'from': eth.accounts[1], 'gas': 100000, ...})
Other options for invoking this class include:
> contract.caller.add(2, 3)
or
> contract.caller().add(2, 3)
or
> contract.caller(transaction={'from': eth.accounts[1], 'gas': 100000, ...}).add(2, 3)
"""
def __init__(self,
abi,
web3,
address,
transaction=None,
block_identifier='latest'):
self.web3 = web3
self.address = address
self.abi = abi
self._functions = None
if self.abi:
if transaction is None:
transaction = {}
self._functions = filter_by_type('function', self.abi)
for func in self._functions:
fn = ContractFunction.factory(
func['name'],
web3=self.web3,
contract_abi=self.abi,
address=self.address,
function_identifier=func['name'])
block_id = parse_block_identifier(self.web3, block_identifier)
caller_method = partial(self.call_function,
fn,
transaction=transaction,
block_identifier=block_id)
setattr(self, func['name'], caller_method)
def __getattr__(self, function_name):
if self.abi is None:
raise NoABIFound(
"There is no ABI found for this contract.",
)
elif not self._functions or len(self._functions) == 0:
raise NoABIFunctionsFound(
"The ABI for this contract contains no function definitions. ",
"Are you sure you provided the correct contract ABI?"
)
elif function_name not in self._functions:
functions_available = ', '.join([fn['name'] for fn in self._functions])
raise MismatchedABI(
"The function '{}' was not found in this contract's ABI. ".format(function_name),
"Here is a list of all of the function names found: ",
"{}. ".format(functions_available),
"Did you mean to call one of those functions?"
)
else:
return super().__getattribute__(function_name)
def __call__(self, transaction=None, block_identifier='latest'):
if transaction is None:
transaction = {}
return type(self)(self.abi,
self.web3,
self.address,
transaction=transaction,
block_identifier=block_identifier)
@staticmethod
def call_function(fn, *args, transaction=None, block_identifier='latest', **kwargs):
if transaction is None:
transaction = {}
return fn(*args, **kwargs).call(transaction, block_identifier)
def check_for_forbidden_api_filter_arguments(event_abi, _filters):
name_indexed_inputs = {_input['name']: _input for _input in event_abi['inputs']}
for filter_name, filter_value in _filters.items():
_input = name_indexed_inputs[filter_name]
if is_array_type(_input['type']):
raise TypeError(
"createFilter no longer supports array type filter arguments. "
"see the build_filter method for filtering array type filters.")
if is_list_like(filter_value) and is_dynamic_sized_type(_input['type']):
raise TypeError(
"createFilter no longer supports setting filter argument options for dynamic sized "
"types. See the build_filter method for setting filters with the match_any "
"method.")
def call_contract_function(
web3,
address,
normalizers,
function_identifier,
transaction,
block_id=None,
contract_abi=None,
fn_abi=None,
*args,
**kwargs):
"""
Helper function for interacting with a contract function using the
`eth_call` API.
"""
call_transaction = prepare_transaction(
address,
web3,
fn_identifier=function_identifier,
contract_abi=contract_abi,
fn_abi=fn_abi,
transaction=transaction,
fn_args=args,
fn_kwargs=kwargs,
)
if block_id is None:
return_data = web3.eth.call(call_transaction)
else:
return_data = web3.eth.call(call_transaction, block_identifier=block_id)
if fn_abi is None:
fn_abi = find_matching_fn_abi(contract_abi, function_identifier, args, kwargs)
output_types = get_abi_output_types(fn_abi)
try:
output_data = decode_abi(output_types, return_data)
except DecodingError as e:
# Provide a more helpful error message than the one provided by
# eth-abi-utils
is_missing_code_error = (
return_data in ACCEPTABLE_EMPTY_STRINGS and
web3.eth.getCode(address) in ACCEPTABLE_EMPTY_STRINGS
)
if is_missing_code_error:
msg = (
"Could not transact with/call contract function, is contract "
"deployed correctly and chain synced?"
)
else:
msg = (
"Could not decode contract function call {} return data {} for "
"output_types {}".format(
function_identifier,
return_data,
output_types
)
)
raise BadFunctionCallOutput(msg) from e
_normalizers = itertools.chain(
BASE_RETURN_NORMALIZERS,
normalizers,
)
normalized_data = map_abi_data(_normalizers, output_types, output_data)
if len(normalized_data) == 1:
return normalized_data[0]
else:
return normalized_data
def parse_block_identifier(web3, block_identifier):
if isinstance(block_identifier, int):
return parse_block_identifier_int(web3, block_identifier)
elif block_identifier in ['latest', 'earliest', 'pending']:
return block_identifier
elif isinstance(block_identifier, bytes) or is_hex_encoded_block_hash(block_identifier):
return web3.eth.getBlock(block_identifier)['number']
else:
raise BlockNumberOutofRange
def parse_block_identifier_int(web3, block_identifier_int):
if block_identifier_int >= 0:
block_num = block_identifier_int
else:
last_block = web3.eth.getBlock('latest')['number']
block_num = last_block + block_identifier_int + 1
if block_num < 0:
raise BlockNumberOutofRange
return block_num
def transact_with_contract_function(
address,
web3,
function_name=None,
transaction=None,
contract_abi=None,
fn_abi=None,
*args,
**kwargs):
"""
Helper function for interacting with a contract function by sending a
transaction.
"""
transact_transaction = prepare_transaction(
address,
web3,
fn_identifier=function_name,
contract_abi=contract_abi,
transaction=transaction,
fn_abi=fn_abi,
fn_args=args,
fn_kwargs=kwargs,
)
txn_hash = web3.eth.sendTransaction(transact_transaction)
return txn_hash
def estimate_gas_for_function(
address,
web3,
fn_identifier=None,
transaction=None,
contract_abi=None,
fn_abi=None,
*args,
**kwargs):
"""Estimates gas cost a function call would take.
Don't call this directly, instead use :meth:`Contract.estimateGas`
on your contract instance.
"""
estimate_transaction = prepare_transaction(
address,
web3,
fn_identifier=fn_identifier,
contract_abi=contract_abi,
fn_abi=fn_abi,
transaction=transaction,
fn_args=args,
fn_kwargs=kwargs,
)
gas_estimate = web3.eth.estimateGas(estimate_transaction)
return gas_estimate
def build_transaction_for_function(
address,
web3,
function_name=None,
transaction=None,
contract_abi=None,
fn_abi=None,
*args,
**kwargs):
"""Builds a dictionary with the fields required to make the given transaction
Don't call this directly, instead use :meth:`Contract.buildTransaction`
on your contract instance.
"""
prepared_transaction = prepare_transaction(
address,
web3,
fn_identifier=function_name,
contract_abi=contract_abi,
fn_abi=fn_abi,
transaction=transaction,
fn_args=args,
fn_kwargs=kwargs,
)
prepared_transaction = fill_transaction_defaults(web3, prepared_transaction)
return prepared_transaction
def find_functions_by_identifier(contract_abi, web3, address, callable_check):
fns_abi = filter_by_type('function', contract_abi)
return [
ContractFunction.factory(
fn_abi['name'],
web3=web3,
contract_abi=contract_abi,
address=address,
function_identifier=fn_abi['name'],
abi=fn_abi
)
for fn_abi in fns_abi
if callable_check(fn_abi)
]
def get_function_by_identifier(fns, identifier):
if len(fns) > 1:
raise ValueError(
'Found multiple functions with matching {0}. '
'Found: {1!r}'.format(identifier, fns)
)
elif len(fns) == 0:
raise ValueError(
'Could not find any function with matching {0}'.format(identifier)
)
return fns[0]
| 32.018417
| 100
| 0.607235
|
4a12d986ad14c30e4447d5f6269fb447e142ac1c
| 2,300
|
py
|
Python
|
convert.py
|
ChristopherRider/cryptography
|
0afd1cfe7944fa212683e8d6d80985088e4a71ee
|
[
"MIT"
] | null | null | null |
convert.py
|
ChristopherRider/cryptography
|
0afd1cfe7944fa212683e8d6d80985088e4a71ee
|
[
"MIT"
] | null | null | null |
convert.py
|
ChristopherRider/cryptography
|
0afd1cfe7944fa212683e8d6d80985088e4a71ee
|
[
"MIT"
] | null | null | null |
"""This python3 script will recognize hex, binary, and base64 strings and convert them to ascii."""
#Written by Christopher Rider
#Note: base64 strings without padding ('=') that contain only [A-Fa-f0-9] will be recognized as hex.
import codecs
import base64
import re
import binascii
# Get the input
import sys
input = sys.argv[1]
#Remove any whitespace
input = input.replace(' ','')
#FUNCTIONS
#CHECK & CONVERT: binary
def check_binary(binary):
"""This function checks if it's binary."""
#Cleaning input up
if binary[:2] == '0b':
binary = binary[2:]
x = len(binary)
#Checking if binary
if bool(re.search('[0-1]{' + str(x) + '}', binary)):
return True
def convert_binary(binary):
"""This function converts a binary string to ascii."""
if binary[:2] != '0b':
binary = '0b'+ binary
binary = int(binary, 2)
output = binary.to_bytes((binary.bit_length() + 7) // 8, 'big').decode()
return output
#CHECK & CONVERT: hex
def check_hex(hex):
"""This function checks if it is hex."""
if hex[:2] == '0x':
hex = hex[2:]
x = len(hex)
if bool(re.search('[a-fA-F0-9]{' + str(x) + '}', hex)):
return True
def convert_hex(hex):
"""This function converts hex to ascii"""
if hex[:2] == '0x':
hex = hex[2:]
output = bytes.fromhex(hex).decode('utf-8')
return output
#CHECK & CONVERT: base64
def check_base64(b64):
"""This function checks if it's base64."""
#Removes the trailing = or ==
b64 = re.sub("={,2}$", '', b64, 2)
#Checks if it's base64
if bool(re.search('[a-zA-Z0-9]{' + str(len(b64)) + '}', b64)):
return True
def convert_base64(b64):
"""This function converts base64 to ascii"""
#Doing the conversion
output = base64.b64decode(b64).decode('utf-8')
#Returning the output, converting to remove the b' prefix
return output
# CHECKING THE ENCODING
if check_binary(input):
output_binary = convert_binary(input)
print("Binary to ascii:\n" + output_binary)
elif check_hex(input):
output_hex = convert_hex(input)
print("Hex to ascii:\n" + output_hex)
elif check_base64(input):
output_b64 = convert_base64(input)
print("Base64 to ascii:\n" + output_b64)
else:
print("I'm not sure what encoding this is.")
| 27.710843
| 101
| 0.633913
|
4a12d98c8d22f53e72256daacb8e5ad5efd4657e
| 1,176
|
py
|
Python
|
visualizer.py
|
pulkitmehta/bikeIntel
|
eb286c62addc2c917af2c3e04af3704dc00c6b1a
|
[
"MIT"
] | null | null | null |
visualizer.py
|
pulkitmehta/bikeIntel
|
eb286c62addc2c917af2c3e04af3704dc00c6b1a
|
[
"MIT"
] | null | null | null |
visualizer.py
|
pulkitmehta/bikeIntel
|
eb286c62addc2c917af2c3e04af3704dc00c6b1a
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt, matplotlib.animation as animation
fig = plt.figure(figsize=(20,3))
plt.title("Rental forecasting | Memory offset = 7 D | Dynamic Adaptability | 2019-01-01 to 2020-05-31")
entries = 240
ax = fig.add_subplot(1,1,1)
f = open('./output/timeline_forecasts.csv','r').read()
t = open('./output/timeline_true_vals.csv','r').read()
linesf = f.split('\n')
linest = t.split('\n')
def animate(i):
#lines = graph_data.split('\n')
xf = []
yf = []
xt = []
yt = []
for linef, linet in zip(linesf[i+20:i+entries], linest[i:i+entries]):
if len(linef) > 1:
x, y = linef.split(',')
xf.append(float(x))
yf.append(float(y))
if len(linet) > 1:
x, y = linet.split(',')
xt.append(float(x))
yt.append(float(y))
ax.clear()
ax.plot(xt, yt, label = 'True Rentals')
ax.plot(xf, yf, '--', label = "Forecast")
ax.legend()
plt.ylabel("No. of Rentals")
plt.xlabel("Timeline-->")
ani = animation.FuncAnimation(fig, animate, interval=10)
plt.show()
| 28
| 103
| 0.561224
|
4a12d9ae1271d4454b1cd3d86d5f8b6f674b0e2e
| 18,075
|
py
|
Python
|
ops/base_module.py
|
ICME2022/R-S-R
|
0e938082a0ea381cff7f0348deb65826848c43e4
|
[
"Apache-2.0"
] | null | null | null |
ops/base_module.py
|
ICME2022/R-S-R
|
0e938082a0ea381cff7f0348deb65826848c43e4
|
[
"Apache-2.0"
] | null | null | null |
ops/base_module.py
|
ICME2022/R-S-R
|
0e938082a0ea381cff7f0348deb65826848c43e4
|
[
"Apache-2.0"
] | null | null | null |
# Code for STTNet
from __future__ import print_function, division, absolute_import
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['FBResNet', 'fbresnet50', 'fbresnet101']
model_urls = {
'fbresnet50': 'http://data.lip6.fr/cadene/pretrainedmodels/resnet50-19c8e357.pth',
'fbresnet101': 'http://data.lip6.fr/cadene/pretrainedmodels/resnet101-5d3b4d8f.pth'
}
'''
def _dimension(x):
_Upsample = torch.nn.Upsample(scale_factor = 6.8,mode = 'nearest') #[64,128,56,56]
x = _Upsample(x) #[64,128,380,380]
_ZeroPad2d = torch.nn.ZeroPad2d(2)
x = _ZeroPad2d(x) #[64,128,384,384]
return x
'''
class mSEModule(nn.Module):
def __init__(self, channel, n_segment=8,index=1):
super(mSEModule, self).__init__()
self.channel = channel
self.reduction = 16
self.n_segment = n_segment
self.stride = 2**(index-1)
self.conv1 = nn.Conv2d(in_channels=self.channel,
out_channels=self.channel//self.reduction,
kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
self.conv2 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel//self.reduction,
kernel_size=3, padding=1, groups=self.channel//self.reduction, bias=False)
self.avg_pool_forward2 = nn.AvgPool2d(kernel_size=2, stride=2)
self.avg_pool_forward4 = nn.AvgPool2d(kernel_size=4, stride=4)
self.sigmoid_forward = nn.Sigmoid()
self.avg_pool_backward2 = nn.AvgPool2d(kernel_size=2, stride=2)#nn.AdaptiveMaxPool2d(1)
self.avg_pool_backward4 = nn.AvgPool2d(kernel_size=4, stride=4)
self.sigmoid_backward = nn.Sigmoid()
self.pad1_forward = (0, 0, 0, 0, 0, 0, 0, 1)
self.pad1_backward = (0, 0, 0, 0, 0, 0, 1, 0)
self.conv3 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(num_features=self.channel)
self.conv3_smallscale2 = nn.Conv2d(in_channels=self.channel//self.reduction,
out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False)
self.bn3_smallscale2 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
self.conv3_smallscale4 = nn.Conv2d(in_channels = self.channel//self.reduction,
out_channels=self.channel//self.reduction,padding=1, kernel_size=3, bias=False)
self.bn3_smallscale4 = nn.BatchNorm2d(num_features=self.channel//self.reduction)
def spatial_pool(self, x):
nt, channel, height, width = x.size()
input_x = x
# [N, C, H * W]
input_x = input_x.view(nt, channel, height * width)
# [N, 1, C, H * W]
input_x = input_x.unsqueeze(1)
# [N, 1, H, W]
context_mask = self.conv_mask(x)
# [N, 1, H * W]
context_mask = context_mask.view(nt, 1, height * width)
# [N, 1, H * W]
context_mask = self.softmax(context_mask)
context_mask = context_mask.view(nt,1,height,width)
return context_mask
def forward(self, x):
bottleneck = self.conv1(x) # nt, c//r, h, w
bottleneck = self.bn1(bottleneck) # nt, c//r, h, w
reshape_bottleneck = bottleneck.view((-1, self.n_segment) + bottleneck.size()[1:]) # n, t, c//r, h, w
t_fea_forward, _ = reshape_bottleneck.split([self.n_segment -1, 1], dim=1) # n, t-1, c//r, h, w
_, t_fea_backward = reshape_bottleneck.split([1, self.n_segment -1], dim=1) # n, t-1, c//r, h, w
conv_bottleneck = self.conv2(bottleneck) # nt, c//r, h, w
reshape_conv_bottleneck = conv_bottleneck.view((-1, self.n_segment) + conv_bottleneck.size()[1:]) # n, t, c//r, h, w
_, tPlusone_fea_forward = reshape_conv_bottleneck.split([1, self.n_segment-1], dim=1) # n, t-1, c//r, h, w
tPlusone_fea_backward ,_ = reshape_conv_bottleneck.split([self.n_segment-1, 1], dim=1) # n, t-1, c//r, h, w
diff_fea_forward = tPlusone_fea_forward - t_fea_forward # n, t-1, c//r, h, w
diff_fea_backward = tPlusone_fea_backward - t_fea_backward# n, t-1, c//r, h, w
diff_fea_pluszero_forward = F.pad(diff_fea_forward, self.pad1_forward, mode="constant", value=0) # n, t, c//r, h, w
diff_fea_pluszero_forward = diff_fea_pluszero_forward.view((-1,) + diff_fea_pluszero_forward.size()[2:]) #nt, c//r, h, w
diff_fea_pluszero_backward = F.pad(diff_fea_backward, self.pad1_backward, mode="constant", value=0) # n, t, c//r, h, w
diff_fea_pluszero_backward = diff_fea_pluszero_backward.view((-1,) + diff_fea_pluszero_backward.size()[2:]) #nt, c//r, h, w
y_forward_smallscale2 = self.avg_pool_forward2(diff_fea_pluszero_forward) # nt, c//r, 1, 1
y_backward_smallscale2 = self.avg_pool_backward2(diff_fea_pluszero_backward) # nt, c//r, 1, 1
y_forward_smallscale4 = diff_fea_pluszero_forward
y_backward_smallscale4 = diff_fea_pluszero_backward
y_forward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_forward_smallscale2))
y_backward_smallscale2 = self.bn3_smallscale2(self.conv3_smallscale2(y_backward_smallscale2))
y_forward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_forward_smallscale4))
y_backward_smallscale4 = self.bn3_smallscale4(self.conv3_smallscale4(y_backward_smallscale4))
y_forward_smallscale2 = F.interpolate(y_forward_smallscale2, diff_fea_pluszero_forward.size()[2:])
y_backward_smallscale2 = F.interpolate(y_backward_smallscale2, diff_fea_pluszero_backward.size()[2:])
y_forward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_forward + 1.0/3.0*y_forward_smallscale2 + 1.0/3.0*y_forward_smallscale4))# nt, c, 1, 1
y_backward = self.bn3(self.conv3(1.0/3.0*diff_fea_pluszero_backward + 1.0/3.0*y_backward_smallscale2 + 1.0/3.0*y_backward_smallscale4)) # nt, c, 1, 1
y_forward = self.sigmoid_forward(y_forward) - 0.5
y_backward = self.sigmoid_backward(y_backward) - 0.5
y = 0.5*y_forward + 0.5*y_backward
output = x + x*y
return output
class ShiftModule(nn.Module):
def __init__(self, input_channels, n_segment=8,n_div=8, mode='shift'):
super(ShiftModule, self).__init__()
self.input_channels = input_channels
self.n_segment = n_segment
self.fold_div = n_div
self.fold = self.input_channels // self.fold_div
self.conv = nn.Conv1d(self.fold_div*self.fold, self.fold_div*self.fold,
kernel_size=3, padding=1, groups=self.fold_div*self.fold,
bias=False)
if mode == 'shift':
self.conv.weight.requires_grad = True
self.conv.weight.data.zero_()
self.conv.weight.data[:self.fold, 0, 2] = 1 # shift left
self.conv.weight.data[self.fold: 2 * self.fold, 0, 0] = 1 # shift right
if 2*self.fold < self.input_channels:
self.conv.weight.data[2 * self.fold:, 0, 1] = 1 # fixed
elif mode == 'fixed':
self.conv.weight.requires_grad = True
self.conv.weight.data.zero_()
self.conv.weight.data[:, 0, 1] = 1 # fixed
elif mode == 'norm':
self.conv.weight.requires_grad = True
def forward(self, x):
nt, c, h, w = x.size()
n_batch = nt // self.n_segment
x = x.view(n_batch, self.n_segment, c, h, w)
x = x.permute(0, 3, 4, 2, 1) # (n_batch, h, w, c, n_segment)
x = x.contiguous().view(n_batch*h*w, c, self.n_segment)
x = self.conv(x) # (n_batch*h*w, c, n_segment)
x = x.view(n_batch, h, w, c, self.n_segment)
x = x.permute(0, 4, 3, 1, 2) # (n_batch, n_segment, c, h, w)
x = x.contiguous().view(nt, c, h, w)
return x
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=True)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, num_segments, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=False)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, num_segments, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=True)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=False)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class BottleneckShift(nn.Module):
expansion = 4
def __init__(self, num_segments, inplanes, planes, stride=1, downsample=None):
super(BottleneckShift, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=True)
self.bn1 = nn.BatchNorm2d(planes)
self.num_segments = num_segments
self.mse = mSEModule(planes, n_segment=self.num_segments,index=1)
self.shift = ShiftModule(planes, n_segment=self.num_segments, n_div=8, mode='shift')
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=True)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=True)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=False)
self.downsample = downsample
self.stride = stride
self.inplanes = inplanes
#self.transformer = VisionTransformer()
'''
self.dropout = 0.1
self.PositionalEncoding = PositionalEncoding(d_model = self.inplanes, dropout = self.dropout)
self.EncoderLayer = EncoderLayer(self.inplanes,
MultiHeadedAttention(8, self.inplanes),
PositionwiseFeedForward(self.inplanes, 2048, self.dropout),
self.dropout,
)
#self.proj = torch.nn.Conv2d(self.inplanes,256,56,56)
'''
#self.Attention = Attention(dim = 782)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.mse(out)
out = self.shift(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class FBResNet(nn.Module):
def __init__(self, num_segments, block, layers, num_classes=1000):
self.inplanes = 64
self.input_space = None
self.input_size = (224, 224, 3)
self.mean = None
self.std = None
self.num_segments = num_segments
super(FBResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=True)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=False)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(self.num_segments,Bottleneck, 64, layers[0])
self.layer2 = self._make_layer(self.num_segments,block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(self.num_segments,block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(self.num_segments,block, 512, layers[3], stride=2)
self.last_linear = nn.Linear(512 * block.expansion, num_classes)
#self.head = nn.Linear(768, 1024)
self.Atten = Self_Attn(512)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, num_segments ,block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=True),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(num_segments, self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(num_segments, self.inplanes, planes))
#forward()
return nn.Sequential(*layers)
def forward(self,x):
x = self.conv1(x)
self.conv1_input = x.clone()
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.Atten(x)
adaptiveAvgPoolWidth = x.shape[2]
x = F.avg_pool2d(features, kernel_size=adaptiveAvgPoolWidth)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
'''
def forward(self, input):
x = self.features(input)
print('input = ',x.shape)
x = self.logits(x)
return x
def features(self, input):
x = self.conv1(input)
self.conv1_input = x.clone()
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
attention_map = self.Atten(x)
x = attention_map
return x
def logits(self, features):
adaptiveAvgPoolWidth = features.shape[2]
x = F.avg_pool2d(features, kernel_size=adaptiveAvgPoolWidth)
x = x.view(x.size(0), -1)
x = self.last_linear(x)
return x
'''
def fbresnet50(num_segments=8,pretrained=False,num_classes=1000):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = FBResNet(num_segments,BottleneckShift, [3, 4, 6, 3], num_classes=num_classes)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['fbresnet50']),strict=False)
return model
def fbresnet101(num_segments,pretrained=False,num_classes=1000):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = FBResNet(num_segments,BottleneckShift, [3, 4, 23, 3], num_classes=num_classes)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['fbresnet101']),strict=False)
return model
class Self_Attn(nn.Module):
""" Self attention Layer"""
def __init__(self,in_dim):
super(Self_Attn,self).__init__()
self.chanel_in = in_dim
# self.activation = activation
self.query_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.key_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim//8 , kernel_size= 1)
self.value_conv = nn.Conv2d(in_channels = in_dim , out_channels = in_dim , kernel_size= 1)
self.gamma = nn.Parameter(torch.zeros(1))
self.softmax = nn.Softmax(dim=-1)
def forward(self,x):
"""
inputs :
x : input feature maps( B X C X W X H)
returns :
out : self attention value + input feature
attention: B X N X N (N is Width*Height)
"""
m_batchsize,C,width ,height = x.size()
proj_query = self.query_conv(x).view(m_batchsize,-1,width*height).permute(0,2,1) # B X CX(N)
proj_key = self.key_conv(x).view(m_batchsize,-1,width*height) # B X C x (*W*H)
energy = torch.bmm(proj_query,proj_key) # transpose check
attention = self.softmax(energy) # BX (N) X (N)
proj_value = self.value_conv(x).view(m_batchsize,-1,width*height) # B X C X N
out = torch.bmm(proj_value,attention.permute(0,2,1) )
out = out.view(m_batchsize,C,width,height)
out = self.gamma*out + x
return out
| 37.893082
| 157
| 0.611065
|
4a12d9b2d0eda8e77d342d718e3dc3283e3ed190
| 303
|
py
|
Python
|
pyNastran/utils/test/test_log.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | null | null | null |
pyNastran/utils/test/test_log.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | 1
|
2021-06-07T16:33:59.000Z
|
2021-06-07T16:33:59.000Z
|
pyNastran/utils/test/test_log.py
|
JohannesSeidel/pyNastran
|
91ccd2756b201a7a3e4bb81cc6dc53b947d43bbf
|
[
"BSD-3-Clause"
] | 1
|
2021-10-14T03:52:44.000Z
|
2021-10-14T03:52:44.000Z
|
"""tests log.py"""
import os
import unittest
from pyNastran.utils.log import make_log
class TestLog(unittest.TestCase):
def test_make_log(self):
"""tests make_log"""
make_log()
os.remove('pyNastran.log')
if __name__ == "__main__": # pragma: no cover
unittest.main()
| 18.9375
| 46
| 0.656766
|
4a12da434328670db2a8b5ce1891a9555295d37b
| 180
|
py
|
Python
|
docs/src/bigger_applications/app/main.py
|
kabirkhan/fastapi
|
9ca72f4ea1fc6f04b7d8d4e2f3c4d7da5f6c322e
|
[
"MIT"
] | 1
|
2019-05-07T14:55:24.000Z
|
2019-05-07T14:55:24.000Z
|
docs/src/bigger_applications/app/main.py
|
kabirkhan/fastapi
|
9ca72f4ea1fc6f04b7d8d4e2f3c4d7da5f6c322e
|
[
"MIT"
] | null | null | null |
docs/src/bigger_applications/app/main.py
|
kabirkhan/fastapi
|
9ca72f4ea1fc6f04b7d8d4e2f3c4d7da5f6c322e
|
[
"MIT"
] | null | null | null |
from fastapi import FastAPI
from .routers import items, users
app = FastAPI()
app.include_router(users.router)
app.include_router(items.router, prefix="/items", tags=["items"])
| 20
| 65
| 0.761111
|
4a12daf888f3493702d4d62fae55357a929ef421
| 7,741
|
py
|
Python
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/project_details_and_status_result.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/project_details_and_status_result.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-iam/huaweicloudsdkiam/v3/model/project_details_and_status_result.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class ProjectDetailsAndStatusResult:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'domain_id': 'str',
'is_domain': 'bool',
'parent_id': 'str',
'name': 'str',
'description': 'str',
'id': 'str',
'enabled': 'bool',
'status': 'str'
}
attribute_map = {
'domain_id': 'domain_id',
'is_domain': 'is_domain',
'parent_id': 'parent_id',
'name': 'name',
'description': 'description',
'id': 'id',
'enabled': 'enabled',
'status': 'status'
}
def __init__(self, domain_id=None, is_domain=None, parent_id=None, name=None, description=None, id=None, enabled=None, status=None):
"""ProjectDetailsAndStatusResult - a model defined in huaweicloud sdk"""
self._domain_id = None
self._is_domain = None
self._parent_id = None
self._name = None
self._description = None
self._id = None
self._enabled = None
self._status = None
self.discriminator = None
self.domain_id = domain_id
self.is_domain = is_domain
self.parent_id = parent_id
self.name = name
self.description = description
self.id = id
self.enabled = enabled
self.status = status
@property
def domain_id(self):
"""Gets the domain_id of this ProjectDetailsAndStatusResult.
项目所属账号ID。
:return: The domain_id of this ProjectDetailsAndStatusResult.
:rtype: str
"""
return self._domain_id
@domain_id.setter
def domain_id(self, domain_id):
"""Sets the domain_id of this ProjectDetailsAndStatusResult.
项目所属账号ID。
:param domain_id: The domain_id of this ProjectDetailsAndStatusResult.
:type: str
"""
self._domain_id = domain_id
@property
def is_domain(self):
"""Gets the is_domain of this ProjectDetailsAndStatusResult.
false.
:return: The is_domain of this ProjectDetailsAndStatusResult.
:rtype: bool
"""
return self._is_domain
@is_domain.setter
def is_domain(self, is_domain):
"""Sets the is_domain of this ProjectDetailsAndStatusResult.
false.
:param is_domain: The is_domain of this ProjectDetailsAndStatusResult.
:type: bool
"""
self._is_domain = is_domain
@property
def parent_id(self):
"""Gets the parent_id of this ProjectDetailsAndStatusResult.
如果查询自己创建的项目,则此处返回所属区域的项目ID。 如果查询的是系统内置项目,如cn-north-4,则此处返回账号ID。
:return: The parent_id of this ProjectDetailsAndStatusResult.
:rtype: str
"""
return self._parent_id
@parent_id.setter
def parent_id(self, parent_id):
"""Sets the parent_id of this ProjectDetailsAndStatusResult.
如果查询自己创建的项目,则此处返回所属区域的项目ID。 如果查询的是系统内置项目,如cn-north-4,则此处返回账号ID。
:param parent_id: The parent_id of this ProjectDetailsAndStatusResult.
:type: str
"""
self._parent_id = parent_id
@property
def name(self):
"""Gets the name of this ProjectDetailsAndStatusResult.
项目名称。
:return: The name of this ProjectDetailsAndStatusResult.
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this ProjectDetailsAndStatusResult.
项目名称。
:param name: The name of this ProjectDetailsAndStatusResult.
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this ProjectDetailsAndStatusResult.
项目描述信息。
:return: The description of this ProjectDetailsAndStatusResult.
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this ProjectDetailsAndStatusResult.
项目描述信息。
:param description: The description of this ProjectDetailsAndStatusResult.
:type: str
"""
self._description = description
@property
def id(self):
"""Gets the id of this ProjectDetailsAndStatusResult.
项目ID。
:return: The id of this ProjectDetailsAndStatusResult.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ProjectDetailsAndStatusResult.
项目ID。
:param id: The id of this ProjectDetailsAndStatusResult.
:type: str
"""
self._id = id
@property
def enabled(self):
"""Gets the enabled of this ProjectDetailsAndStatusResult.
项目是否可用。
:return: The enabled of this ProjectDetailsAndStatusResult.
:rtype: bool
"""
return self._enabled
@enabled.setter
def enabled(self, enabled):
"""Sets the enabled of this ProjectDetailsAndStatusResult.
项目是否可用。
:param enabled: The enabled of this ProjectDetailsAndStatusResult.
:type: bool
"""
self._enabled = enabled
@property
def status(self):
"""Gets the status of this ProjectDetailsAndStatusResult.
项目状态。
:return: The status of this ProjectDetailsAndStatusResult.
:rtype: str
"""
return self._status
@status.setter
def status(self, status):
"""Sets the status of this ProjectDetailsAndStatusResult.
项目状态。
:param status: The status of this ProjectDetailsAndStatusResult.
:type: str
"""
self._status = status
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ProjectDetailsAndStatusResult):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 26.152027
| 136
| 0.585583
|
4a12db93d513a3bd1df205326c965d457cb18198
| 52
|
py
|
Python
|
app/controllers/helpers/__init__.py
|
taeram/ineffable
|
fcaf1cc405f9e4dc3346fa91338b75b195325f84
|
[
"MIT"
] | 7
|
2015-05-08T21:37:27.000Z
|
2019-01-01T22:11:07.000Z
|
app/controllers/helpers/__init__.py
|
taeram/ineffable
|
fcaf1cc405f9e4dc3346fa91338b75b195325f84
|
[
"MIT"
] | null | null | null |
app/controllers/helpers/__init__.py
|
taeram/ineffable
|
fcaf1cc405f9e4dc3346fa91338b75b195325f84
|
[
"MIT"
] | null | null | null |
__all__ = ["cachebuster", "favicon", "letsencrypt"]
| 26
| 51
| 0.692308
|
4a12dd1af28c98d1d614cb7ea7fb97b0b1b84dee
| 25,952
|
py
|
Python
|
pandas/core/sparse/array.py
|
buntwo/pandas
|
8e6b09ff3a09de58e82da6dcabbfddba61a743d6
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/sparse/array.py
|
buntwo/pandas
|
8e6b09ff3a09de58e82da6dcabbfddba61a743d6
|
[
"BSD-3-Clause"
] | null | null | null |
pandas/core/sparse/array.py
|
buntwo/pandas
|
8e6b09ff3a09de58e82da6dcabbfddba61a743d6
|
[
"BSD-3-Clause"
] | 1
|
2018-10-05T04:56:16.000Z
|
2018-10-05T04:56:16.000Z
|
"""
SparseArray data structure
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231
import numpy as np
import warnings
import pandas as pd
from pandas.core.base import PandasObject
from pandas import compat
from pandas.compat import range
from pandas.compat.numpy import function as nv
from pandas.core.dtypes.generic import (
ABCSparseArray, ABCSparseSeries)
from pandas.core.dtypes.common import (
_ensure_platform_int,
is_float, is_integer,
is_integer_dtype,
is_bool_dtype,
is_list_like,
is_string_dtype,
is_scalar, is_dtype_equal)
from pandas.core.dtypes.cast import (
maybe_convert_platform, maybe_promote,
astype_nansafe, find_common_type)
from pandas.core.dtypes.missing import isna, notna, na_value_for_dtype
import pandas._libs.sparse as splib
from pandas._libs.sparse import SparseIndex, BlockIndex, IntIndex
from pandas._libs import index as libindex
import pandas.core.algorithms as algos
import pandas.core.ops as ops
import pandas.io.formats.printing as printing
from pandas.util._decorators import Appender
from pandas.core.indexes.base import _index_shared_docs
_sparray_doc_kwargs = dict(klass='SparseArray')
def _arith_method(op, name, str_rep=None, default_axis=None, fill_zeros=None,
**eval_kwargs):
"""
Wrapper function for Series arithmetic operations, to avoid
code duplication.
"""
def wrapper(self, other):
if isinstance(other, np.ndarray):
if len(self) != len(other):
raise AssertionError("length mismatch: %d vs. %d" %
(len(self), len(other)))
if not isinstance(other, ABCSparseArray):
dtype = getattr(other, 'dtype', None)
other = SparseArray(other, fill_value=self.fill_value,
dtype=dtype)
return _sparse_array_op(self, other, op, name)
elif is_scalar(other):
with np.errstate(all='ignore'):
fill = op(_get_fill(self), np.asarray(other))
result = op(self.sp_values, other)
return _wrap_result(name, result, self.sp_index, fill)
else: # pragma: no cover
raise TypeError('operation with %s not supported' % type(other))
if name.startswith("__"):
name = name[2:-2]
wrapper.__name__ = name
return wrapper
def _get_fill(arr):
# coerce fill_value to arr dtype if possible
# int64 SparseArray can have NaN as fill_value if there is no missing
try:
return np.asarray(arr.fill_value, dtype=arr.dtype)
except ValueError:
return np.asarray(arr.fill_value)
def _sparse_array_op(left, right, op, name, series=False):
if series and is_integer_dtype(left) and is_integer_dtype(right):
# series coerces to float64 if result should have NaN/inf
if name in ('floordiv', 'mod') and (right.values == 0).any():
left = left.astype(np.float64)
right = right.astype(np.float64)
elif name in ('rfloordiv', 'rmod') and (left.values == 0).any():
left = left.astype(np.float64)
right = right.astype(np.float64)
# dtype used to find corresponding sparse method
if not is_dtype_equal(left.dtype, right.dtype):
dtype = find_common_type([left.dtype, right.dtype])
left = left.astype(dtype)
right = right.astype(dtype)
else:
dtype = left.dtype
# dtype the result must have
result_dtype = None
if left.sp_index.ngaps == 0 or right.sp_index.ngaps == 0:
with np.errstate(all='ignore'):
result = op(left.get_values(), right.get_values())
fill = op(_get_fill(left), _get_fill(right))
if left.sp_index.ngaps == 0:
index = left.sp_index
else:
index = right.sp_index
elif left.sp_index.equals(right.sp_index):
with np.errstate(all='ignore'):
result = op(left.sp_values, right.sp_values)
fill = op(_get_fill(left), _get_fill(right))
index = left.sp_index
else:
if name[0] == 'r':
left, right = right, left
name = name[1:]
if name in ('and', 'or') and dtype == 'bool':
opname = 'sparse_{name}_uint8'.format(name=name)
# to make template simple, cast here
left_sp_values = left.sp_values.view(np.uint8)
right_sp_values = right.sp_values.view(np.uint8)
result_dtype = np.bool
else:
opname = 'sparse_{name}_{dtype}'.format(name=name, dtype=dtype)
left_sp_values = left.sp_values
right_sp_values = right.sp_values
sparse_op = getattr(splib, opname)
with np.errstate(all='ignore'):
result, index, fill = sparse_op(left_sp_values, left.sp_index,
left.fill_value, right_sp_values,
right.sp_index, right.fill_value)
if result_dtype is None:
result_dtype = result.dtype
return _wrap_result(name, result, index, fill, dtype=result_dtype)
def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
""" wrap op result to have correct dtype """
if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):
dtype = np.bool
if is_bool_dtype(dtype):
# fill_value may be np.bool_
fill_value = bool(fill_value)
return SparseArray(data, sparse_index=sparse_index,
fill_value=fill_value, dtype=dtype)
class SparseArray(PandasObject, np.ndarray):
"""Data structure for labeled, sparse floating point 1-D data
Parameters
----------
data : {array-like (1-D), Series, SparseSeries, dict}
kind : {'block', 'integer'}
fill_value : float
Code for missing value. Defaults depends on dtype.
0 for int dtype, False for bool dtype, and NaN for other dtypes
sparse_index : {BlockIndex, IntIndex}, optional
Only if you have one. Mainly used internally
Notes
-----
SparseArray objects are immutable via the typical Python means. If you
must change values, convert to dense, make your changes, then convert back
to sparse
"""
__array_priority__ = 15
_typ = 'array'
_subtyp = 'sparse_array'
sp_index = None
fill_value = None
def __new__(cls, data, sparse_index=None, index=None, kind='integer',
fill_value=None, dtype=None, copy=False):
if index is not None:
if data is None:
data = np.nan
if not is_scalar(data):
raise Exception("must only pass scalars with an index ")
values = np.empty(len(index), dtype='float64')
values.fill(data)
data = values
if isinstance(data, ABCSparseSeries):
data = data.values
is_sparse_array = isinstance(data, SparseArray)
if dtype is not None:
dtype = np.dtype(dtype)
if is_sparse_array:
sparse_index = data.sp_index
values = data.sp_values
fill_value = data.fill_value
else:
# array-like
if sparse_index is None:
if dtype is not None:
data = np.asarray(data, dtype=dtype)
res = make_sparse(data, kind=kind, fill_value=fill_value)
values, sparse_index, fill_value = res
else:
values = _sanitize_values(data)
if len(values) != sparse_index.npoints:
raise AssertionError("Non array-like type {0} must have"
" the same length as the"
" index".format(type(values)))
# Create array, do *not* copy data by default
if copy:
subarr = np.array(values, dtype=dtype, copy=True)
else:
subarr = np.asarray(values, dtype=dtype)
# Change the class of the array to be the subclass type.
return cls._simple_new(subarr, sparse_index, fill_value)
@classmethod
def _simple_new(cls, data, sp_index, fill_value):
if not isinstance(sp_index, SparseIndex):
# caller must pass SparseIndex
raise ValueError('sp_index must be a SparseIndex')
if fill_value is None:
if sp_index.ngaps > 0:
# has missing hole
fill_value = np.nan
else:
fill_value = na_value_for_dtype(data.dtype)
if (is_integer_dtype(data) and is_float(fill_value) and
sp_index.ngaps > 0):
# if float fill_value is being included in dense repr,
# convert values to float
data = data.astype(float)
result = data.view(cls)
if not isinstance(sp_index, SparseIndex):
# caller must pass SparseIndex
raise ValueError('sp_index must be a SparseIndex')
result.sp_index = sp_index
result._fill_value = fill_value
return result
@property
def _constructor(self):
return lambda x: SparseArray(x, fill_value=self.fill_value,
kind=self.kind)
@property
def kind(self):
if isinstance(self.sp_index, BlockIndex):
return 'block'
elif isinstance(self.sp_index, IntIndex):
return 'integer'
def __array_wrap__(self, out_arr, context=None):
"""
NumPy calls this method when ufunc is applied
Parameters
----------
out_arr : ndarray
ufunc result (note that ufunc is only applied to sp_values)
context : tuple of 3 elements (ufunc, signature, domain)
for example, following is a context when np.sin is applied to
SparseArray,
(<ufunc 'sin'>, (SparseArray,), 0))
See http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
"""
if isinstance(context, tuple) and len(context) == 3:
ufunc, args, domain = context
# to apply ufunc only to fill_value (to avoid recursive call)
args = [getattr(a, 'fill_value', a) for a in args]
with np.errstate(all='ignore'):
fill_value = ufunc(self.fill_value, *args[1:])
else:
fill_value = self.fill_value
return self._simple_new(out_arr, sp_index=self.sp_index,
fill_value=fill_value)
def __array_finalize__(self, obj):
"""
Gets called after any ufunc or other array operations, necessary
to pass on the index.
"""
self.sp_index = getattr(obj, 'sp_index', None)
self._fill_value = getattr(obj, 'fill_value', None)
def __reduce__(self):
"""Necessary for making this object picklable"""
object_state = list(np.ndarray.__reduce__(self))
subclass_state = self.fill_value, self.sp_index
object_state[2] = (object_state[2], subclass_state)
return tuple(object_state)
def __setstate__(self, state):
"""Necessary for making this object picklable"""
nd_state, own_state = state
np.ndarray.__setstate__(self, nd_state)
fill_value, sp_index = own_state[:2]
self.sp_index = sp_index
self._fill_value = fill_value
def __len__(self):
try:
return self.sp_index.length
except:
return 0
def __unicode__(self):
return '%s\nFill: %s\n%s' % (printing.pprint_thing(self),
printing.pprint_thing(self.fill_value),
printing.pprint_thing(self.sp_index))
def disable(self, other):
raise NotImplementedError('inplace binary ops not supported')
# Inplace operators
__iadd__ = disable
__isub__ = disable
__imul__ = disable
__itruediv__ = disable
__ifloordiv__ = disable
__ipow__ = disable
# Python 2 division operators
if not compat.PY3:
__idiv__ = disable
@property
def values(self):
"""
Dense values
"""
output = np.empty(len(self), dtype=self.dtype)
int_index = self.sp_index.to_int_index()
output.fill(self.fill_value)
output.put(int_index.indices, self)
return output
@property
def sp_values(self):
# caching not an option, leaks memory
return self.view(np.ndarray)
@property
def fill_value(self):
return self._fill_value
@fill_value.setter
def fill_value(self, value):
if not is_scalar(value):
raise ValueError('fill_value must be a scalar')
# if the specified value triggers type promotion, raise ValueError
new_dtype, fill_value = maybe_promote(self.dtype, value)
if is_dtype_equal(self.dtype, new_dtype):
self._fill_value = fill_value
else:
msg = 'unable to set fill_value {0} to {1} dtype'
raise ValueError(msg.format(value, self.dtype))
def get_values(self, fill=None):
""" return a dense representation """
return self.to_dense(fill=fill)
def to_dense(self, fill=None):
"""
Convert SparseArray to a NumPy array.
Parameters
----------
fill: float, default None
.. deprecated:: 0.20.0
This argument is not respected by this function.
Returns
-------
arr : NumPy array
"""
if fill is not None:
warnings.warn(("The 'fill' parameter has been deprecated and "
"will be removed in a future version."),
FutureWarning, stacklevel=2)
return self.values
def __iter__(self):
for i in range(len(self)):
yield self._get_val_at(i)
def __getitem__(self, key):
"""
"""
if is_integer(key):
return self._get_val_at(key)
elif isinstance(key, tuple):
data_slice = self.values[key]
else:
if isinstance(key, SparseArray):
if is_bool_dtype(key):
key = key.to_dense()
else:
key = np.asarray(key)
if hasattr(key, '__len__') and len(self) != len(key):
return self.take(key)
else:
data_slice = self.values[key]
return self._constructor(data_slice)
def __getslice__(self, i, j):
if i < 0:
i = 0
if j < 0:
j = 0
slobj = slice(i, j)
return self.__getitem__(slobj)
def _get_val_at(self, loc):
n = len(self)
if loc < 0:
loc += n
if loc >= n or loc < 0:
raise IndexError('Out of bounds access')
sp_loc = self.sp_index.lookup(loc)
if sp_loc == -1:
return self.fill_value
else:
return libindex.get_value_at(self, sp_loc)
@Appender(_index_shared_docs['take'] % _sparray_doc_kwargs)
def take(self, indices, axis=0, allow_fill=True,
fill_value=None, **kwargs):
"""
Sparse-compatible version of ndarray.take
Returns
-------
taken : ndarray
"""
nv.validate_take(tuple(), kwargs)
if axis:
raise ValueError("axis must be 0, input was {0}".format(axis))
if is_integer(indices):
# return scalar
return self[indices]
indices = _ensure_platform_int(indices)
n = len(self)
if allow_fill and fill_value is not None:
# allow -1 to indicate self.fill_value,
# self.fill_value may not be NaN
if (indices < -1).any():
msg = ('When allow_fill=True and fill_value is not None, '
'all indices must be >= -1')
raise ValueError(msg)
elif (n <= indices).any():
msg = 'index is out of bounds for size {0}'
raise IndexError(msg.format(n))
else:
if ((indices < -n) | (n <= indices)).any():
msg = 'index is out of bounds for size {0}'
raise IndexError(msg.format(n))
indices = indices.astype(np.int32)
if not (allow_fill and fill_value is not None):
indices = indices.copy()
indices[indices < 0] += n
locs = self.sp_index.lookup_array(indices)
indexer = np.arange(len(locs), dtype=np.int32)
mask = locs != -1
if mask.any():
indexer = indexer[mask]
new_values = self.sp_values.take(locs[mask])
else:
indexer = np.empty(shape=(0, ), dtype=np.int32)
new_values = np.empty(shape=(0, ), dtype=self.sp_values.dtype)
sp_index = _make_index(len(indices), indexer, kind=self.sp_index)
return self._simple_new(new_values, sp_index, self.fill_value)
def __setitem__(self, key, value):
# if is_integer(key):
# self.values[key] = value
# else:
# raise Exception("SparseArray does not support seting non-scalars
# via setitem")
raise TypeError(
"SparseArray does not support item assignment via setitem")
def __setslice__(self, i, j, value):
if i < 0:
i = 0
if j < 0:
j = 0
slobj = slice(i, j) # noqa
# if not is_scalar(value):
# raise Exception("SparseArray does not support seting non-scalars
# via slices")
# x = self.values
# x[slobj] = value
# self.values = x
raise TypeError("SparseArray does not support item assignment via "
"slices")
def astype(self, dtype=None, copy=True):
dtype = np.dtype(dtype)
sp_values = astype_nansafe(self.sp_values, dtype, copy=copy)
try:
if is_bool_dtype(dtype):
# to avoid np.bool_ dtype
fill_value = bool(self.fill_value)
else:
fill_value = dtype.type(self.fill_value)
except ValueError:
msg = 'unable to coerce current fill_value {0} to {1} dtype'
raise ValueError(msg.format(self.fill_value, dtype))
return self._simple_new(sp_values, self.sp_index,
fill_value=fill_value)
def copy(self, deep=True):
"""
Make a copy of the SparseArray. Only the actual sparse values need to
be copied.
"""
if deep:
values = self.sp_values.copy()
else:
values = self.sp_values
return SparseArray(values, sparse_index=self.sp_index,
dtype=self.dtype, fill_value=self.fill_value)
def count(self):
"""
Compute sum of non-NA/null observations in SparseArray. If the
fill_value is not NaN, the "sparse" locations will be included in the
observation count.
Returns
-------
nobs : int
"""
sp_values = self.sp_values
valid_spvals = np.isfinite(sp_values).sum()
if self._null_fill_value:
return valid_spvals
else:
return valid_spvals + self.sp_index.ngaps
@property
def _null_fill_value(self):
return isna(self.fill_value)
@property
def _valid_sp_values(self):
sp_vals = self.sp_values
mask = notna(sp_vals)
return sp_vals[mask]
@Appender(_index_shared_docs['fillna'] % _sparray_doc_kwargs)
def fillna(self, value, downcast=None):
if downcast is not None:
raise NotImplementedError
if issubclass(self.dtype.type, np.floating):
value = float(value)
new_values = np.where(isna(self.sp_values), value, self.sp_values)
fill_value = value if self._null_fill_value else self.fill_value
return self._simple_new(new_values, self.sp_index,
fill_value=fill_value)
def sum(self, axis=0, *args, **kwargs):
"""
Sum of non-NA/null values
Returns
-------
sum : float
"""
nv.validate_sum(args, kwargs)
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
if self._null_fill_value:
return sp_sum
else:
nsparse = self.sp_index.ngaps
return sp_sum + self.fill_value * nsparse
def cumsum(self, axis=0, *args, **kwargs):
"""
Cumulative sum of non-NA/null values.
When performing the cumulative summation, any non-NA/null values will
be skipped. The resulting SparseArray will preserve the locations of
NaN values, but the fill value will be `np.nan` regardless.
Parameters
----------
axis : int or None
Axis over which to perform the cumulative summation. If None,
perform cumulative summation over flattened array.
Returns
-------
cumsum : SparseArray
"""
nv.validate_cumsum(args, kwargs)
if axis is not None and axis >= self.ndim: # Mimic ndarray behaviour.
raise ValueError("axis(={axis}) out of bounds".format(axis=axis))
if not self._null_fill_value:
return SparseArray(self.to_dense()).cumsum()
return SparseArray(self.sp_values.cumsum(), sparse_index=self.sp_index,
fill_value=self.fill_value)
def mean(self, axis=0, *args, **kwargs):
"""
Mean of non-NA/null values
Returns
-------
mean : float
"""
nv.validate_mean(args, kwargs)
valid_vals = self._valid_sp_values
sp_sum = valid_vals.sum()
ct = len(valid_vals)
if self._null_fill_value:
return sp_sum / ct
else:
nsparse = self.sp_index.ngaps
return (sp_sum + self.fill_value * nsparse) / (ct + nsparse)
def value_counts(self, dropna=True):
"""
Returns a Series containing counts of unique values.
Parameters
----------
dropna : boolean, default True
Don't include counts of NaN, even if NaN is in sp_values.
Returns
-------
counts : Series
"""
keys, counts = algos._value_counts_arraylike(self.sp_values,
dropna=dropna)
fcounts = self.sp_index.ngaps
if fcounts > 0:
if self._null_fill_value and dropna:
pass
else:
if self._null_fill_value:
mask = pd.isna(keys)
else:
mask = keys == self.fill_value
if mask.any():
counts[mask] += fcounts
else:
keys = np.insert(keys, 0, self.fill_value)
counts = np.insert(counts, 0, fcounts)
if not isinstance(keys, pd.Index):
keys = pd.Index(keys)
result = pd.Series(counts, index=keys)
return result
def _maybe_to_dense(obj):
""" try to convert to dense """
if hasattr(obj, 'to_dense'):
return obj.to_dense()
return obj
def _maybe_to_sparse(array):
""" array must be SparseSeries or SparseArray """
if isinstance(array, ABCSparseSeries):
array = array.values.copy()
return array
def _sanitize_values(arr):
"""
return an ndarray for our input,
in a platform independent manner
"""
if hasattr(arr, 'values'):
arr = arr.values
else:
# scalar
if is_scalar(arr):
arr = [arr]
# ndarray
if isinstance(arr, np.ndarray):
pass
elif is_list_like(arr) and len(arr) > 0:
arr = maybe_convert_platform(arr)
else:
arr = np.asarray(arr)
return arr
def make_sparse(arr, kind='block', fill_value=None):
"""
Convert ndarray to sparse format
Parameters
----------
arr : ndarray
kind : {'block', 'integer'}
fill_value : NaN or another value
Returns
-------
(sparse_values, index) : (ndarray, SparseIndex)
"""
arr = _sanitize_values(arr)
if arr.ndim > 1:
raise TypeError("expected dimension <= 1 data")
if fill_value is None:
fill_value = na_value_for_dtype(arr.dtype)
if isna(fill_value):
mask = notna(arr)
else:
# For str arrays in NumPy 1.12.0, operator!= below isn't
# element-wise but just returns False if fill_value is not str,
# so cast to object comparison to be safe
if is_string_dtype(arr):
arr = arr.astype(object)
mask = arr != fill_value
length = len(arr)
if length != mask.size:
# the arr is a SparseArray
indices = mask.sp_index.indices
else:
indices = mask.nonzero()[0].astype(np.int32)
index = _make_index(length, indices, kind)
sparsified_values = arr[mask]
return sparsified_values, index, fill_value
def _make_index(length, indices, kind):
if kind == 'block' or isinstance(kind, BlockIndex):
locs, lens = splib.get_blocks(indices)
index = BlockIndex(length, locs, lens)
elif kind == 'integer' or isinstance(kind, IntIndex):
index = IntIndex(length, indices)
else: # pragma: no cover
raise ValueError('must be block or integer type')
return index
ops.add_special_arithmetic_methods(SparseArray, arith_method=_arith_method,
comp_method=_arith_method,
bool_method=_arith_method,
use_numexpr=False)
| 32.07911
| 79
| 0.579609
|
4a12dd4e2ba866630104305dcae0a0a2e769a40a
| 18,540
|
py
|
Python
|
ref_utils/LFDataset.py
|
IndigoPurple/EFENet
|
e88234486f19534274a0a20badc251788ac67e31
|
[
"MIT"
] | 11
|
2021-10-18T02:14:37.000Z
|
2022-02-10T13:17:07.000Z
|
ref_utils/LFDataset.py
|
IndigoPurple/EFENet
|
e88234486f19534274a0a20badc251788ac67e31
|
[
"MIT"
] | null | null | null |
ref_utils/LFDataset.py
|
IndigoPurple/EFENet
|
e88234486f19534274a0a20badc251788ac67e31
|
[
"MIT"
] | null | null | null |
import sys
import matplotlib.pyplot as plt
import numpy as np
import h5py
from ulti import blend, grayscale, my_imshow, psnr
from Dataset import Dataset
import time
import scipy
import scipy.io
import scipy.misc
import scipy.io as sio
import os
# dataset_test = ChairsSDHomDataset(filename = '/fileserver/haitian/dataset/ChairsSDHom/test_bicubic.h5')
# for i in range(0,100):
# print 'next batch'
# dataset_test.nextBatch(batchsize=4, shuffle=False, view_mode = 'Random', residue = False, augmentation = False, index_inc = False)
# dataset_test.nextBatch(batchsize=4, shuffle=False, view_mode = 'Random', residue = False, augmentation = False, index_inc = True)
# dataset_train = Dataset(filename = '../HT_sr/flowdata/LF_video_Dataset/train_bicubic.h5', scale = 8)
# dataset_train.nextBatch(batchsize=8,shuffle=True,view_mode = 'Random',residue = True, augmentation = True)
# dataset_train.nextBatch(batchsize=8,shuffle=True,view_mode = 'Random',residue = True, augmentation = True)
# # dataset_train.nextBatch(batchsize=8,shuffle=False,view_mode = 'Random',residue = False, augmentation = True)
# # dataset_test = Dataset(filename = '../HT_sr/flowdata/LF_video_Dataset/test_bicubic.h5')
class LFDataset(Dataset):
def loadArrays(self, filename, scale = 8, MDSR_as_bilinear = False):
f = h5py.File(filename,'r')
self.scale = scale
self.arrays['img_HR'] = f.get('/img_HR')
self.arrays['img_LR'] = f.get('/img_LR_'+str(scale))
if MDSR_as_bilinear == False:
self.arrays['img_LR_upsample'] = f.get('/img_LR_'+str(scale)+'_upsample')
else:
print 'warning, use MDSR for flownet'
self.arrays['img_LR_upsample'] = f.get('/MDSR_'+str(scale))
self.arrays['img_MDSR'] = f.get('/MDSR_'+str(scale))
#print 'loading dataset: ', filename
#print 'img_HR ', self.arrays['img_HR'].shape
#print 'img_LR ', self.arrays['img_LR'].shape
#print 'img_LR_upsample ', self.arrays['img_LR_upsample'].shape
#if not self.arrays['img_MDSR'] is None:
# print 'img_MDSR ', self.arrays['img_MDSR'].shape
#else:
# print 'cannot find img_MDSR'
self.size_N = self.arrays['img_HR'].shape[0]
self.size_C = self.arrays['img_HR'].shape[3]
self.size_H = self.arrays['img_HR'].shape[4]
self.size_W = self.arrays['img_HR'].shape[5]
#edit by ytan
self.iter_count = 0
return
def __genViewPosition(self, view_mode, specified_view = None):
if (view_mode == 'Random'):
rnd_y = self.rng_viewpoint.randint(0,7)
rnd_x = self.rng_viewpoint.randint(0,7)
while True:
rnd_y_ref = self.rng_viewpoint.randint(0,7)
rnd_x_ref = self.rng_viewpoint.randint(0,7)
if (rnd_y_ref!=rnd_y) or (rnd_x_ref!=rnd_x):
break
elif (view_mode == 'Random_FixOffset'): # disparicy = (-3,-3)
rnd_y = self.rng_viewpoint.randint(6,7)
rnd_x = self.rng_viewpoint.randint(6,7)
rnd_y_ref = rnd_y-6
rnd_x_ref = rnd_x-6
elif (view_mode == 'Fixed'):
rnd_y = 0
rnd_x = 0
rnd_y_ref = 3
rnd_x_ref = 3
elif (view_mode == 'Fixed-inv'):
rnd_y = 3
rnd_x = 3
rnd_y_ref = 0
rnd_x_ref = 0
elif (view_mode == 'Fixed-inv-large'):
rnd_y = 7
rnd_x = 7
rnd_y_ref = 0
rnd_x_ref = 0
elif (view_mode == 'specified'):
rnd_y, rnd_x, rnd_y_ref, rnd_x_ref = (specified_view, specified_view, 0, 0)
return rnd_y,rnd_x,rnd_y_ref,rnd_x_ref
def genViewPosition(self, view_mode, specified_view = None):
if (view_mode == 'Random'):
rnd_y = self.rng_viewpoint.randint(0,7)
rnd_x = self.rng_viewpoint.randint(0,7)
while True:
rnd_y_ref = self.rng_viewpoint.randint(0,7)
rnd_x_ref = self.rng_viewpoint.randint(0,7)
if (rnd_y_ref!=rnd_y) or (rnd_x_ref!=rnd_x):
break
elif (view_mode == 'Random_FixOffset'): # disparicy = (-3,-3)
rnd_y = self.rng_viewpoint.randint(6,7)
rnd_x = self.rng_viewpoint.randint(6,7)
rnd_y_ref = rnd_y-6
rnd_x_ref = rnd_x-6
elif (view_mode == 'Fixed'):
rnd_y = 0
rnd_x = 0
rnd_y_ref = 3
rnd_x_ref = 3
elif (view_mode == 'Fixed-inv'):
rnd_y = 3
rnd_x = 3
rnd_y_ref = 0
rnd_x_ref = 0
elif (view_mode == 'Fixed-inv-large'):
rnd_y = 7
rnd_x = 7
rnd_y_ref = 0
rnd_x_ref = 0
elif (view_mode == 'specified'):
rnd_y, rnd_x, rnd_y_ref, rnd_x_ref = (specified_view, specified_view, 0, 0)
return rnd_y,rnd_x,rnd_y_ref,rnd_x_ref
def debugDataset(self): # test the average psnr of 'random' view and 'Fixed' view
sum_PSNR_fixed = 0
sum_PSNR_random = 0
sum_distance_random = 0
import math
for idx_img in range(268):
idx_y, idx_x, idx_y_ref, idx_x_ref = self.__genViewPosition('Random')
buffer_HR = np.asarray(self.array_dict['img_HR'][idx_img,idx_y,idx_x,:,:,:], dtype = np.float32) / 255.0
buffer_REF = np.asarray(self.array_dict['img_HR'][idx_img,idx_y_ref,idx_x_ref,:,:,:], dtype = np.float32) / 255.0
sum_PSNR_fixed += psnr(buffer_HR, buffer_REF)
sum_distance_random += math.sqrt( (idx_y-idx_y_ref)*(idx_y-idx_y_ref) + (idx_x-idx_x_ref)*(idx_x-idx_x_ref) )
idx_y, idx_x, idx_y_ref, idx_x_ref = self.__genViewPosition('Fixed')
buffer_HR = np.asarray(self.array_dict['img_HR'][idx_img,idx_y,idx_x,:,:,:], dtype = np.float32) / 255.0
buffer_REF = np.asarray(self.array_dict['img_HR'][idx_img,idx_y_ref,idx_x_ref,:,:,:], dtype = np.float32) / 255.0
sum_PSNR_random += psnr(buffer_HR, buffer_REF)
print 'psnr: ',sum_PSNR_fixed/(idx_img+1), sum_PSNR_random/(idx_img+1)
print 'distance: ', 4.2426, sum_distance_random/(idx_img+1)
def nextBatch_new_fake(self, batchsize = 8, shuffle = False, view_mode = 'Random', specified_view = None, augmentation = False, offset_augmentation = False, index_inc = True, crop_shape = None, SR=True, Dual = False):
# nextBatch_new(batchsize = 8, shuffle = False, view_mode = 'Random', augmentation = False, index_inc = True)
# generate a dictionary that contains HR, LR and SR images of two views
idx_list = self.genIndex_list(batchsize, shuffle, index_inc = index_inc)
for k in range(batchsize):
# generate img number
idx_img = idx_list[k]
# generate view position
y1, x1, y2, x2 = self.__genViewPosition(view_mode, specified_view = specified_view)
# data augmentation
if augmentation:
augmentation_config = self.augmentation_array_config()
if offset_augmentation:
dx = self.rng_viewpoint_augmentation.randint(-30, 30)
dy = self.rng_viewpoint_augmentation.randint(-30, 30)
# print dx, dy
input_img2_HR[:,:,max(0, 0+dy):min(self.size_H, self.size_H+dy), max(0, 0+dx):min(self.size_W, self.size_W+dx)] = input_img2_HR[:,:,0:min(self.size_H, self.size_H+dy)-max(0, 0+dy), 0:min(self.size_W, self.size_W+dx)-max(0, 0+dx)]
return False
def nextBatch_new(self, batchsize = 8, shuffle = False, view_mode = 'Random', specified_view = None, augmentation = False, offset_augmentation = False, index_inc = True, crop_shape = None, SR=True, Dual = False, checkpoint=0):
# nextBatch_new(batchsize = 8, shuffle = False, view_mode = 'Random', augmentation = False, index_inc = True)
# generate a dictionary that contains HR, LR and SR images of two views
buff = dict()
#edited by ytan on 20181210
if self.iter_count == 0:
print ('pass former %d'%(checkpoint))
for i in range(checkpoint):
idx_list = self.genIndex_list(batchsize, shuffle, index_inc = index_inc)
#print (idx_list)
for j in range(len(idx_list)):
y1, x1, y2, x2 = self.__genViewPosition(view_mode, specified_view = specified_view)
augmentation_config = self.augmentation_array_config()
self.iter_count += 1
# init
input_img1_LR = np.zeros([batchsize,3,self.size_H,self.size_W], dtype = np.float32)
input_img2_LR = np.zeros([batchsize,3,self.size_H,self.size_W], dtype = np.float32)
# input_img1_LR = np.zeros([batchsize,3,320,512], dtype = np.float32)
input_img1_HR = np.zeros([batchsize,3,self.size_H,self.size_W], dtype = np.float32)
input_img2_HR = np.zeros([batchsize,3,self.size_H,self.size_W], dtype = np.float32)
if SR:
input_img1_SR = np.zeros([batchsize,3,self.size_H,self.size_W], dtype = np.float32)
if Dual:
input_img2_SR = np.zeros([batchsize,3,self.size_H,self.size_W], dtype = np.float32)
t_read = time.time()
idx_list = self.genIndex_list(batchsize, shuffle, index_inc = index_inc)
for k in range(batchsize):
# generate img number
idx_img = idx_list[k]
# generate view position
y1, x1, y2, x2 = self.__genViewPosition(view_mode, specified_view = specified_view)
# LR
input_img1_LR[k,:,:,:] = np.asarray(self.arrays['img_LR_upsample'][idx_img,y1,x1,:,:,:], dtype = np.float32) / 255.0
if Dual:
input_img2_LR[k,:,:,:] = np.asarray(self.arrays['img_LR_upsample'][idx_img,y2,x2,:,:,:], dtype = np.float32) / 255.0
# HR
input_img1_HR[k,:,:,:] = np.asarray(self.arrays['img_HR'][idx_img,y1,x1,:,:,:], dtype = np.float32) / 255.0
input_img2_HR[k,:,:,:] = np.asarray(self.arrays['img_HR'][idx_img,y2,x2,:,:,:], dtype = np.float32) / 255.0
# SR
if SR:
input_img1_SR[k,:,:,:] = np.asarray(self.arrays['img_MDSR'][idx_img,y1,x1,:,:,:], dtype = np.float32) / 255.0
if Dual:
input_img2_SR[k,:,:,:] = np.asarray(self.arrays['img_MDSR'][idx_img,y2,x2,:,:,:], dtype = np.float32) / 255.0
t_aug = time.time()
# data augmentation
if augmentation:
augmentation_config = self.augmentation_array_config()
input_img1_LR = self.augmentation_array(input_img1_LR, augmentation_config)
if Dual:
input_img2_LR = self.augmentation_array(input_img2_LR, augmentation_config)
input_img1_HR = self.augmentation_array(input_img1_HR, augmentation_config)
input_img2_HR = self.augmentation_array(input_img2_HR, augmentation_config)
if SR:
input_img1_SR = self.augmentation_array(input_img1_SR, augmentation_config)
if Dual:
input_img2_SR = self.augmentation_array(input_img2_SR, augmentation_config)
t_crop = time.time()
# if offset_augmentation:
# # dx = self.rng_viewpoint_augmentation.randint(-20, 21)
# # dy = self.rng_viewpoint_augmentation.randint(-20, 21)
# dx = self.rng_viewpoint_augmentation.randint(-14, 15)
# dy = self.rng_viewpoint_augmentation.randint(-14, 15)
# input_img2_HR_aug = np.array(input_img2_HR, copy=True)
# input_img2_HR_aug[:,:,max(0, 0+dy):min(self.size_H, self.size_H+dy), max(0, 0+dx):min(self.size_W, self.size_W+dx)] = input_img2_HR[:,:,0:min(self.size_H, self.size_H+dy)-max(0, 0+dy), 0:min(self.size_W, self.size_W+dx)-max(0, 0+dx)]
# plt.subplot(1,1,1)
# my_imshow(input_img2_HR[np.newaxis,0,:,:,:])
# plt.pause(1.0)
# plt.subplot(1,1,1)
# my_imshow(input_img2_HR_aug[np.newaxis,0,:,:,:])
# plt.show()
if offset_augmentation:
dx = self.rng_viewpoint_augmentation.randint(-30, 30)
dy = self.rng_viewpoint_augmentation.randint(-30, 30)
# print dx, dy
input_img2_HR[:,:,max(0, 0+dy):min(self.size_H, self.size_H+dy), max(0, 0+dx):min(self.size_W, self.size_W+dx)] = input_img2_HR[:,:,0:min(self.size_H, self.size_H+dy)-max(0, 0+dy), 0:min(self.size_W, self.size_W+dx)-max(0, 0+dx)]
# crop image
if not crop_shape is None:
input_img1_LR = input_img1_LR[:,:,0:crop_shape[0],0:crop_shape[1]]
if Dual:
input_img2_LR = input_img2_LR[:,:,0:crop_shape[0],0:crop_shape[1]]
input_img1_HR = input_img1_HR[:,:,0:crop_shape[0],0:crop_shape[1]]
input_img2_HR = input_img2_HR[:,:,0:crop_shape[0],0:crop_shape[1]]
if SR:
input_img1_SR = input_img1_SR[:,:,0:crop_shape[0],0:crop_shape[1]]
if Dual:
input_img2_SR = input_img2_SR[:,:,0:crop_shape[0],0:crop_shape[1]]
t_end = time.time()
# print 'g_read time: ', t_aug - t_read, 'g_aug time: ', t_crop - t_aug, 'g_aug time: ', t_end - t_crop
# pack buffer
buff['input_img1_LR'] = input_img1_LR
if Dual:
buff['input_img2_LR'] = input_img2_LR
buff['input_img1_HR'] = input_img1_HR
buff['input_img2_HR'] = input_img2_HR
if SR:
buff['input_img1_SR'] = input_img1_SR
if Dual:
buff['input_img2_SR'] = input_img2_SR
return buff
def generate_compare_experiment_data(self, save_path):
crop_shape = [320,512]
# init
input_img1_LR = np.zeros([3,self.size_H,self.size_W], dtype = np.float32)
input_img1_HR = np.zeros([3,self.size_H,self.size_W], dtype = np.float32)
input_img2_HR = np.zeros([3,self.size_H,self.size_W], dtype = np.float32)
input_img1_SR = np.zeros([3,self.size_H,self.size_W], dtype = np.float32)
for v in range(1,8):
folder = save_path+'/LR('+str(v)+','+str(v)+')-REF(0,0)'
print folder
if not os.path.exists(folder):
os.mkdir(folder)
if not os.path.exists(folder+'/LR/'):
os.mkdir(folder+'/LR/')
if not os.path.exists(folder+'/GT/'):
os.mkdir(folder+'/GT/')
if not os.path.exists(folder+'/REF/'):
os.mkdir(folder+'/REF/')
if not os.path.exists(folder+'/MDSR/'):
os.mkdir(folder+'/MDSR/')
if not os.path.exists(folder+'/LR_bicubic/'):
os.mkdir(folder+'/LR_bicubic/')
for idx_img in range(self.size_N):
y1, x1, y2, x2 = (v, v, 0, 0)
img_LR_upsample = np.asarray(self.arrays['img_LR_upsample'][idx_img,y1,x1,:,0:crop_shape[0],0:crop_shape[1]], dtype = np.float32) / 255.0
img_LR = np.asarray(self.arrays['img_LR'][idx_img,y1,x1,:,0:crop_shape[0]/self.scale,0:crop_shape[1]/self.scale], dtype = np.float32) / 255.0
input_img1_HR = np.asarray(self.arrays['img_HR'][idx_img,y1,x1,:,0:crop_shape[0],0:crop_shape[1]], dtype = np.float32) / 255.0
input_img2_HR = np.asarray(self.arrays['img_HR'][idx_img,y2,x2,:,0:crop_shape[0],0:crop_shape[1]], dtype = np.float32) / 255.0
input_img1_SR = np.asarray(self.arrays['img_MDSR'][idx_img,y1,x1,:,0:crop_shape[0],0:crop_shape[1]], dtype = np.float32) / 255.0
scipy.misc.toimage(np.squeeze(np.transpose(img_LR_upsample,axes=(1,2,0))), cmin=0.0, cmax=1.0).save(folder+'/LR_bicubic/'+str(idx_img)+'.png')
scipy.misc.toimage(np.squeeze(np.transpose(img_LR,axes=(1,2,0))), cmin=0.0, cmax=1.0).save(folder+'/LR/'+str(idx_img)+'.png')
scipy.misc.toimage(np.squeeze(np.transpose(input_img1_HR,axes=(1,2,0))), cmin=0.0, cmax=1.0).save(folder+'/GT/'+str(idx_img)+'.png')
scipy.misc.toimage(np.squeeze(np.transpose(input_img2_HR,axes=(1,2,0))), cmin=0.0, cmax=1.0).save(folder+'/REF/'+str(idx_img)+'.png')
scipy.misc.toimage(np.squeeze(np.transpose(input_img1_SR,axes=(1,2,0))), cmin=0.0, cmax=1.0).save(folder+'/MDSR/'+str(idx_img)+'.png')
def generate_MDSR_input_data(self, save_path=''):
crop_shape = [320,512]
# init
input_img1_LR = np.zeros([3,self.size_H,self.size_W], dtype = np.float32)
for idx_img in range(self.size_N): #self.size_N
print idx_img, ' of ', self.size_N
for idx_y in range(8):
for idx_x in range(8):
filename = save_path+'/'+str(idx_img)+'_'+str(idx_y)+'_'+str(idx_x)+'.png'
if os.path.exists(filename) and imghdr.what(filename)=='png':
continue
img_LR = np.asarray(self.arrays['img_LR'][idx_img,idx_y,idx_x,:,:,:], dtype = np.float32) / 255.0
scipy.misc.toimage(img_LR, cmin=0.0, cmax=1.0).save(filename)
import imghdr
# dataset_train = LFDataset(filename = '/fileserver/haitian/dataset/lf_video_dataset/train_x4_x8.h5', scale = 4)
# dataset_train.generate_MDSR_input_data('/fileserver/haitian/dataset/lf_video_dataset/MDSR/LFDataset_train_LR_x4')
# dataset_train = LFDataset(filename = '/fileserver/haitian/dataset/lf_video_dataset/test_x4_x8.h5', scale = 4)
# dataset_train.generate_MDSR_input_data('/fileserver/haitian/dataset/lf_video_dataset/MDSR/LFDataset_test_LR_x4')
# dataset_train = LFDataset(filename = '/fileserver/haitian/dataset/lf_video_dataset/test_x4_x8.h5', scale = 8)
# dataset_train.generate_compare_experiment_data('/fileserver/haitian/haitian_backup/ECCV_RefSR_exp/LF_dataset_x8')
# dataset_train = LFDataset(filename = '/fileserver/haitian/dataset/lf_video_dataset/test_x4_x8.h5', scale = 4)
# dataset_train.generate_compare_experiment_data('/fileserver/haitian/haitian_backup/ECCV_RefSR_exp/LF_dataset_x4')
# dataset_test = LFDataset(filename = '/fileserver/haitian/dataset/lf_video_dataset/test_x4_x8.h5')
# for i in range(100):
# dataset_test.nextBatch_new(batchsize=8,shuffle=True,view_mode = 'Random', augmentation = True, offset_augmentation = True, crop_shape = (320,512))
| 51.787709
| 247
| 0.61068
|
4a12de7a9778cef173d8c04bfb12379ba8bf6396
| 7,489
|
py
|
Python
|
Tutorials/Slither/Slither-Source-Code.py
|
Charles0115/Python-PyGame-Projects
|
43ae8527f8e584d1363f1e24a4e455127594ead8
|
[
"MIT"
] | 1
|
2020-06-14T12:47:39.000Z
|
2020-06-14T12:47:39.000Z
|
Tutorials/Slither/Slither-Source-Code.py
|
Charles0115/Python-Pygame-Projects
|
43ae8527f8e584d1363f1e24a4e455127594ead8
|
[
"MIT"
] | null | null | null |
Tutorials/Slither/Slither-Source-Code.py
|
Charles0115/Python-Pygame-Projects
|
43ae8527f8e584d1363f1e24a4e455127594ead8
|
[
"MIT"
] | null | null | null |
import pygame
import time
import random
pygame.init()
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
green = (0, 155, 0)
display_width = 800
display_height = 600
gameDisplay = pygame.display.set_mode((display_width, display_height))
pygame.display.set_caption('Slither')
icon = pygame.image.load('apple2.png')
pygame.display.set_icon(icon)
img = pygame.image.load('snakehead2.png')
appleimg = pygame.image.load('apple2.png')
clock = pygame.time.Clock()
AppleThickness = 30
block_size = 20
FPS = 15
direction = "right"
smallfont = pygame.font.SysFont("comicsansms", 25)
medfont = pygame.font.SysFont("comicsansms", 50)
largefont = pygame.font.SysFont("comicsansms", 80)
def pause():
paused = True
message_to_screen("Paused",
black,
-100,
size="large")
message_to_screen("Press C to continue or Q to quit.",
black,
25)
pygame.display.update()
while paused:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
paused = False
elif event.key == pygame.K_q:
pygame.quit()
quit()
# gameDisplay.fill(white)
clock.tick(5)
def score(score):
text = smallfont.render("Score: " + str(score), True, black)
gameDisplay.blit(text, [0, 0])
def randAppleGen():
randAppleX = round(random.randrange(0, display_width - AppleThickness)) # /10.0)*10.0
randAppleY = round(random.randrange(0, display_height - AppleThickness)) # /10.0)*10.0
return randAppleX, randAppleY
def game_intro():
intro = True
while intro:
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
quit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_c:
intro = False
if event.key == pygame.K_q:
pygame.quit()
quit()
gameDisplay.fill(white)
message_to_screen("Welcome to Slither",
green,
-100,
"large")
message_to_screen("The objective of the game is to eat red apples",
black,
-30)
message_to_screen("The more apples you eat, the longer you get",
black,
10)
message_to_screen("If you run into yourself, or the edges, you die!",
black,
50)
message_to_screen("Press C to play, P to pause or Q to quit.",
black,
180)
pygame.display.update()
clock.tick(15)
def snake(block_size, snakelist):
if direction == "right":
head = pygame.transform.rotate(img, 270)
if direction == "left":
head = pygame.transform.rotate(img, 90)
if direction == "up":
head = img
if direction == "down":
head = pygame.transform.rotate(img, 180)
gameDisplay.blit(head, (snakelist[-1][0], snakelist[-1][1]))
for XnY in snakelist[:-1]:
pygame.draw.rect(gameDisplay, green, [XnY[0], XnY[1], block_size, block_size])
def text_objects(text, color, size):
if size == "small":
textSurface = smallfont.render(text, True, color)
elif size == "medium":
textSurface = medfont.render(text, True, color)
elif size == "large":
textSurface = largefont.render(text, True, color)
return textSurface, textSurface.get_rect()
def message_to_screen(msg, color, y_displace=0, size="small"):
textSurf, textRect = text_objects(msg, color, size)
textRect.center = (display_width / 2), (display_height / 2) + y_displace
gameDisplay.blit(textSurf, textRect)
def gameLoop():
global direction
direction = 'right'
gameExit = False
gameOver = False
lead_x = display_width / 2
lead_y = display_height / 2
lead_x_change = 10
lead_y_change = 0
snakeList = []
snakeLength = 1
randAppleX, randAppleY = randAppleGen()
while not gameExit:
if gameOver:
message_to_screen("Game over",
red,
y_displace=-50,
size="large")
message_to_screen("Press C to play again or Q to quit",
black,
50,
size="medium")
pygame.display.update()
while gameOver:
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameOver = False
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
gameExit = True
gameOver = False
if event.key == pygame.K_c:
gameLoop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
gameExit = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
direction = "left"
lead_x_change = -block_size
lead_y_change = 0
elif event.key == pygame.K_RIGHT:
direction = "right"
lead_x_change = block_size
lead_y_change = 0
elif event.key == pygame.K_UP:
direction = "up"
lead_y_change = -block_size
lead_x_change = 0
elif event.key == pygame.K_DOWN:
direction = "down"
lead_y_change = block_size
lead_x_change = 0
elif event.key == pygame.K_p:
pause()
if lead_x >= display_width or lead_x < 0 or lead_y >= display_height or lead_y < 0:
gameOver = True
lead_x += lead_x_change
lead_y += lead_y_change
gameDisplay.fill(white)
# pygame.draw.rect(gameDisplay, red, [randAppleX, randAppleY, AppleThickness, AppleThickness])
gameDisplay.blit(appleimg, (randAppleX, randAppleY))
snakeHead = [lead_x, lead_y]
snakeList.append(snakeHead)
if len(snakeList) > snakeLength:
del snakeList[0]
for eachSegment in snakeList[:-1]:
if eachSegment == snakeHead:
gameOver = True
snake(block_size, snakeList)
score(snakeLength - 1)
pygame.display.update()
if randAppleX < lead_x < randAppleX + AppleThickness or randAppleX < lead_x + block_size < randAppleX + AppleThickness:
if randAppleY < lead_y < randAppleY + AppleThickness:
randAppleX, randAppleY = randAppleGen()
snakeLength += 1
elif randAppleY < lead_y + block_size < randAppleY + AppleThickness:
randAppleX, randAppleY = randAppleGen()
snakeLength += 1
clock.tick(FPS)
pygame.quit()
quit()
game_intro()
gameLoop()
| 27.332117
| 127
| 0.528375
|
4a12df7ee6f62c095aa1451c14fc7fbf4d9fafeb
| 10,554
|
py
|
Python
|
evology/research/MCarloLongRuns/archiv/PopulationDynamics.py
|
aymericvie/evology
|
8f00d94dee7208be5a5bdd0375a9d6ced25097f4
|
[
"Apache-2.0"
] | null | null | null |
evology/research/MCarloLongRuns/archiv/PopulationDynamics.py
|
aymericvie/evology
|
8f00d94dee7208be5a5bdd0375a9d6ced25097f4
|
[
"Apache-2.0"
] | 2
|
2022-01-10T02:10:56.000Z
|
2022-01-14T03:41:42.000Z
|
evology/research/MCarloLongRuns/archiv/PopulationDynamics.py
|
aymericvie/evology
|
8f00d94dee7208be5a5bdd0375a9d6ced25097f4
|
[
"Apache-2.0"
] | null | null | null |
"""
This experiment investigates how learning rates and reinvestment rates affect population dynamics.
It takes a fixed initial condition (wealth coordinates), time horizon and population size.
"""
# Imports
import numpy as np
import pandas as pd
import sys
if sys.platform == "darwin":
sys.path.append("/Users/aymericvie/Documents/GitHub/evology/evology/code")
# Need to be executed from cd to MCarloLongRuns
if sys.platform == "linux":
sys.path.append("/home/vie/Documents/GitHub/evology/evology/code")
from main import main as evology
import multiprocessing as mp
# Fixed parameters
TimeHorizon = (
252 * 100 + 3 * 21
) # 100 Years + 3 months to compensate early period without recording data.
PopulationSize = 100
Coordinates = [1 / 3, 1 / 3, 1 / 3]
seed = 8
reps = 150
# COnfig: coords, popsize, time, selection rate, mutation rate, reinvestment rate
Config1 = [Coordinates, PopulationSize, TimeHorizon, 0, 0, 1] # Static
Config2 = [Coordinates, PopulationSize, TimeHorizon, 1 / 252, 1 / 252, 1] # Learning 1Y
Config3 = [Coordinates, PopulationSize, TimeHorizon, 1 / 252, 0, 0] # Imitation-only 1Y
Config4 = [Coordinates, PopulationSize, TimeHorizon, 1 / 252, 0] # Mutation-only 1Y
Config5 = [
Coordinates,
PopulationSize,
TimeHorizon,
1 / (252 * 2),
1 / (252 * 2),
0,
] # Learning 2Y
Config6 = [
Coordinates,
PopulationSize,
TimeHorizon,
1 / (252 * 3),
1 / (252 * 3),
0,
] # Learning 3Y
Config7 = [Coordinates, PopulationSize, TimeHorizon, 0, 0, 1.2] # Reinvestment high
Config8 = [Coordinates, PopulationSize, TimeHorizon, 0, 0, 0.8] # Reinvestment low
"""
def main(
space,
solver,
wealth_coordinates,
POPULATION_SIZE,
MAX_GENERATIONS,
PROBA_SELECTION,
MUTATION_RATE,
ReinvestmentRate,
tqdm_display,
reset_wealth
):
"""
Config = Config1
def job1(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 1 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config2
def job2(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 2 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config3
def job3(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 3 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config4
def job4(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 4 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config5
def job5(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 5 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config6
def job6(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 6 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config7
def job7(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 7 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
Config = Config8
def job8(iteration):
np.random.seed()
try:
df, pop = evology(
"scholl",
"esl.true",
Config[0],
Config[1],
Config[2],
Config[3],
Config[4],
Config[5],
True,
False,
)
return df["WShare_NT"], df["WShare_VI"], df["WShare_TF"]
except:
print("Job 8 failed and passed.")
array = np.zeros((TimeHorizon - 3 * 21))
return pd.Series(array), pd.Series(array), pd.Series(array)
def main1():
p = mp.Pool()
data = p.map(job1, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main2():
p = mp.Pool()
data = p.map(job2, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main3():
p = mp.Pool()
data = p.map(job3, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main4():
p = mp.Pool()
data = p.map(job4, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main5():
p = mp.Pool()
data = p.map(job5, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main6():
p = mp.Pool()
data = p.map(job6, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main7():
p = mp.Pool()
data = p.map(job7, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
def main8():
p = mp.Pool()
data = p.map(job8, [i for i in range(reps)])
p.close()
data = np.array(list(data))
return data
if __name__ == "__main__":
data = main1()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config1/MC_NT.csv")
dfVI.to_csv("data_config1/MC_VI.csv")
dfTF.to_csv("data_config1/MC_TF.csv")
data = main2()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config2/MC_NT.csv")
dfVI.to_csv("data_config2/MC_VI.csv")
dfTF.to_csv("data_config2/MC_TF.csv")
data = main3()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config3/MC_NT.csv")
dfVI.to_csv("data_config3/MC_VI.csv")
dfTF.to_csv("data_config3/MC_TF.csv")
data = main4()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config4/MC_NT.csv")
dfVI.to_csv("data_config4/MC_VI.csv")
dfTF.to_csv("data_config4/MC_TF.csv")
data = main5()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config5/MC_NT.csv")
dfVI.to_csv("data_config5/MC_VI.csv")
dfTF.to_csv("data_config5/MC_TF.csv")
data = main6()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config6/MC_NT.csv")
dfVI.to_csv("data_config6/MC_VI.csv")
dfTF.to_csv("data_config6/MC_TF.csv")
data = main7()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config7/MC_NT.csv")
dfVI.to_csv("data_config7/MC_VI.csv")
dfTF.to_csv("data_config7/MC_TF.csv")
data = main8()
dfNT = pd.DataFrame()
dfVI = pd.DataFrame()
dfTF = pd.DataFrame()
for i in range(reps):
name = "Rep%s" % i
dfNT[name] = data[i, 0]
dfVI[name] = data[i, 1]
dfTF[name] = data[i, 2]
dfNT.to_csv("data_config8/MC_NT.csv")
dfVI.to_csv("data_config8/MC_VI.csv")
dfTF.to_csv("data_config8/MC_TF.csv")
| 23.40133
| 99
| 0.538185
|
4a12dfc674fa58ed8fcc00061bd8d36161e9f45f
| 994
|
py
|
Python
|
all_functions/scrapy/tutorial/tutorial/spiders/dmoz_spider.py
|
Heroku-elasa/-heroku-buildpack-python-ieee-new
|
06ec2fda04d9e478ed2506400e460489b0ca91ab
|
[
"MIT"
] | null | null | null |
all_functions/scrapy/tutorial/tutorial/spiders/dmoz_spider.py
|
Heroku-elasa/-heroku-buildpack-python-ieee-new
|
06ec2fda04d9e478ed2506400e460489b0ca91ab
|
[
"MIT"
] | 15
|
2021-03-18T20:23:25.000Z
|
2022-03-11T23:16:16.000Z
|
all_functions/scrapy/tutorial/tutorial/spiders/dmoz_spider.py
|
Heroku-elasa/heroku-buildpack-python-ieee-new
|
06ec2fda04d9e478ed2506400e460489b0ca91ab
|
[
"MIT"
] | 1
|
2017-03-04T16:48:55.000Z
|
2017-03-04T16:48:55.000Z
|
import scrapy
class DmozSpider(scrapy.Spider):
name = "dmoz"
allowed_domains = ["dmoz.org"]
start_urls = [
"http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
"http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/"
]
def parse(self, response):
for sel in response.xpath('//ul/li'):
item = DmozItem()
item['title'] = sel.xpath('a/text()').extract()
item['link'] = sel.xpath('a/@href').extract()
item['desc'] = sel.xpath('text()').extract()
yield item
def parse_items(self, response):
hxs = HtmlXPathSelector(response)
titles = hxs.select('//span[@class="pl"]')
items = []
for titles in titles:
item = CraigslistSampleItem()
item ["title"] = titles.select("a/text()").extract()
item ["link"] = titles.select("a/@href").extract()
items.append(item)
return(items)
| 32.064516
| 79
| 0.561368
|
4a12dfcfbb428328c4ebf3dacaa7f9c9201aa56a
| 399
|
py
|
Python
|
web_project/asgi.py
|
TIJMacLean/FlyMyPlane
|
3e3f069b37482456857146383e3c5d8bbcdfdb0e
|
[
"Apache-2.0"
] | 1
|
2020-11-19T06:11:25.000Z
|
2020-11-19T06:11:25.000Z
|
web_project/asgi.py
|
TIJMacLean/FlyMyPlane
|
3e3f069b37482456857146383e3c5d8bbcdfdb0e
|
[
"Apache-2.0"
] | 7
|
2021-04-08T21:31:50.000Z
|
2022-01-13T03:05:02.000Z
|
web_project/asgi.py
|
AhmadTariqAlflahat/Web-Project-Django
|
80d8ba5cd04df9781e94a65122aa703999c1bdb8
|
[
"MIT"
] | null | null | null |
"""
ASGI config for web_project project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_project.settings')
application = get_asgi_application()
| 23.470588
| 78
| 0.789474
|
4a12e13515b4bfe127e3e8efd494388be34de178
| 23,537
|
py
|
Python
|
imagemounter/volume_system.py
|
pombredanne/imagemounter
|
5d8ba6f2b4cd61b9ad259d5265f6fd7f74a0e82f
|
[
"MIT"
] | 89
|
2015-05-08T08:29:18.000Z
|
2022-03-05T00:03:44.000Z
|
imagemounter/volume_system.py
|
personx000/imagemounter
|
5d8ba6f2b4cd61b9ad259d5265f6fd7f74a0e82f
|
[
"MIT"
] | 32
|
2015-04-08T19:59:51.000Z
|
2022-03-24T15:06:02.000Z
|
imagemounter/volume_system.py
|
personx000/imagemounter
|
5d8ba6f2b4cd61b9ad259d5265f6fd7f74a0e82f
|
[
"MIT"
] | 47
|
2015-03-04T19:04:11.000Z
|
2022-02-27T18:22:24.000Z
|
import inspect
import logging
import os
import subprocess
import sys
from collections import defaultdict
import re
from imagemounter import _util, dependencies
from imagemounter.exceptions import ArgumentError, SubsystemError, ModuleNotFoundError, PrerequisiteFailedError
logger = logging.getLogger(__name__)
class VolumeSystem(object):
"""A VolumeSystem is a collection of volumes. Every :class:`Disk` contains exactly one VolumeSystem. Each
system contains several :class:`Volumes`, which, in turn, may contain additional volume systems.
"""
def __init__(self, parent, vstype='', volume_detector=''):
"""Creates a VolumeSystem.
:param parent: the parent may either be a :class:`Disk` or a :class:`Volume` that contains this VolumeSystem.
:param str vstype: the volume system type to use.
:param str volume_detector: the volume system detection method to use
"""
self.parent = parent
self.disk = parent.disk if hasattr(parent, 'disk') else parent
if vstype:
self.vstype = vstype
elif self.parent.index in self.disk.parser.vstypes:
self.vstype = self.disk.parser.vstypes[self.parent.index]
elif '*' in self.disk.parser.vstypes:
self.vstype = self.disk.parser.vstypes['*']
else:
self.vstype = "detect"
if volume_detector == 'auto' or not volume_detector:
self.volume_detector = VolumeSystem._determine_auto_detection_method()
else:
self.volume_detector = volume_detector
self.volume_source = ""
self.volumes = []
self.has_detected = False
self._disktype = defaultdict(dict)
def __iter__(self):
yield from self.volumes
def __len__(self):
return len(self.volumes)
def __getitem__(self, item):
item_suffix = ".{}".format(item)
for v in self.volumes:
if v.index.endswith(item_suffix) or v.index == str(item):
return v
raise KeyError
def _make_subvolume(self, **args):
"""Creates a subvolume, adds it to this class and returns it."""
from imagemounter.volume import Volume
v = Volume(disk=self.disk, parent=self.parent,
volume_detector=self.volume_detector,
**args) # vstype is not passed down, let it decide for itself.
self.volumes.append(v)
return v
def _make_single_subvolume(self, only_one=True, **args):
"""Creates a subvolume, adds it to this class, sets the volume index to 0 and returns it.
:param bool only_one: if this volume system already has at least one volume, it is returned instead.
"""
if only_one and self.volumes:
return self.volumes[0]
if self.parent.index is None:
index = '0'
else:
index = '{0}.0'.format(self.parent.index)
volume = self._make_subvolume(index=index, **args)
return volume
def detect_volumes(self, vstype=None, method=None, force=False):
"""Iterator for detecting volumes within this volume system.
:param str vstype: The volume system type to use. If None, uses :attr:`vstype`
:param str method: The detection method to use. If None, uses :attr:`detection`
:param bool force: Specify if you wnat to force running the detection if has_Detected is True.
"""
if self.has_detected and not force:
logger.warning("Detection already ran.")
return
if vstype is None:
vstype = self.vstype
if method is None:
method = self.volume_detector
if method == 'auto':
method = VolumeSystem._determine_auto_detection_method()
if method in ALL_VOLUME_SYSTEM_DETECTORS:
yield from ALL_VOLUME_SYSTEM_DETECTORS[method].detect(self, vstype)
else:
logger.error("No viable detection method found")
raise ArgumentError("No viable detection method found")
self.has_detected = True
@staticmethod
def _determine_auto_detection_method():
"""Return the detection method to use when the detection method is 'auto'"""
if dependencies.pytsk3.is_available:
return 'pytsk3'
elif dependencies.mmls.is_available:
return 'mmls'
elif dependencies.parted.is_available:
return 'parted'
else:
raise PrerequisiteFailedError("No valid detection method is installed.")
def preload_volume_data(self):
"""Preloads volume data. It is used to call internal methods that contain information about a volume."""
self._load_disktype_data()
def _load_disktype_data(self):
"""Calls the :command:`disktype` command and obtains the disk GUID from GPT volume systems. As we
are running the tool anyway, the label is also extracted from the tool if it is not yet set.
The disktype data is only loaded and not assigned to volumes yet.
"""
if not _util.command_exists('disktype'):
logger.warning("disktype not installed, could not detect volume type")
return None
disktype = _util.check_output_(['disktype', self.parent.get_raw_path()]).strip()
current_partition = None
for line in disktype.splitlines():
if not line:
continue
# noinspection PyBroadException
try:
line = line.strip()
find_partition_nr = re.match(r"^Partition (\d+):", line)
if find_partition_nr:
current_partition = int(find_partition_nr.group(1))
elif current_partition is not None:
if line.startswith("Type ") and "GUID" in line:
self._disktype[current_partition]['guid'] = \
line[line.index('GUID') + 5:-1].strip() # output is between ()
elif line.startswith("Partition Name "):
self._disktype[current_partition]['label'] = \
line[line.index('Name ') + 6:-1].strip() # output is between ""
except Exception:
logger.exception("Error while parsing disktype output")
return
def _assign_disktype_data(self, volume, slot=None):
"""Assigns cached disktype data to a volume."""
if slot is None:
slot = volume.slot
if slot in self._disktype:
data = self._disktype[slot]
if not volume.info.get('guid') and 'guid' in data:
volume.info['guid'] = data['guid']
if not volume.info.get('label') and 'label' in data:
volume.info['label'] = data['label']
class VolumeDetector(object):
type = None
special = False
def detect(self, volume_system, vstype='detect'):
"""Finds and mounts all volumes based on parted.
:param VolumeSystem volume_system: The volume system.
"""
raise NotImplementedError()
def _format_index(self, volume_system, idx):
"""Returns a formatted index given the disk index idx."""
if volume_system.parent.index is not None:
return '{0}.{1}'.format(volume_system.parent.index, idx)
else:
return str(idx)
class SingleVolumeDetector(VolumeDetector):
type = 'single'
special = True
def detect(self, volume_system, vstype='detect'):
"""'Detects' a single volume. It should not be called other than from a :class:`Disk`."""
volume = volume_system._make_single_subvolume(offset=0)
is_directory = os.path.isdir(volume_system.parent.get_raw_path())
if is_directory:
filesize = _util.check_output_(['du', '-scDb', volume_system.parent.get_raw_path()]).strip()
if filesize:
volume.size = int(filesize.splitlines()[-1].split()[0])
else:
description = _util.check_output_(['file', '-sL', volume_system.parent.get_raw_path()]).strip()
if description:
# description is the part after the :, until the first comma
volume.info['fsdescription'] = description.split(': ', 1)[1].split(',', 1)[0].strip()
if 'size' in description:
volume.size = int(re.findall(r'size:? (\d+)', description)[0])
else:
volume.size = os.path.getsize(volume_system.parent.get_raw_path())
volume.flag = 'alloc'
volume_system.volume_source = 'single'
volume_system._assign_disktype_data(volume)
yield volume
class Pytsk3VolumeDetector(VolumeDetector):
type = 'pytsk3'
def _find_volumes(self, volume_system, vstype='detect'):
"""Finds all volumes based on the pytsk3 library."""
try:
# noinspection PyUnresolvedReferences
import pytsk3
except ImportError:
logger.error("pytsk3 not installed, could not detect volumes")
raise ModuleNotFoundError("pytsk3")
baseimage = None
try:
# ewf raw image is now available on base mountpoint
# either as ewf1 file or as .dd file
raw_path = volume_system.parent.get_raw_path()
# noinspection PyBroadException
try:
baseimage = pytsk3.Img_Info(raw_path)
except Exception:
logger.error("Failed retrieving image info (possible empty image).", exc_info=True)
return []
try:
volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_' + vstype.upper()),
volume_system.parent.offset // volume_system.disk.block_size)
volume_system.volume_source = 'multi'
return volumes
except Exception as e:
# some bug in sleuthkit makes detection sometimes difficult, so we hack around it:
if "(GPT or DOS at 0)" in str(e) and vstype != 'gpt':
volume_system.vstype = 'gpt'
# noinspection PyBroadException
try:
logger.warning("Error in retrieving volume info: TSK couldn't decide between GPT and DOS, "
"choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True)
volumes = pytsk3.Volume_Info(baseimage, getattr(pytsk3, 'TSK_VS_TYPE_GPT'))
volume_system.volume_source = 'multi'
return volumes
except Exception as e:
logger.exception("Failed retrieving image info (possible empty image).")
raise SubsystemError(e)
else:
logger.exception("Failed retrieving image info (possible empty image).")
raise SubsystemError(e)
finally:
if baseimage:
baseimage.close()
del baseimage
@dependencies.require(dependencies.pytsk3)
def detect(self, volume_system, vstype='detect'):
"""Generator that mounts every partition of this image and yields the mountpoint."""
# Loop over all volumes in image.
for p in self._find_volumes(volume_system, vstype):
import pytsk3
volume = volume_system._make_subvolume(
index=self._format_index(volume_system, p.addr),
offset=p.start * volume_system.disk.block_size,
size=p.len * volume_system.disk.block_size
)
# Fill volume with more information
volume.info['fsdescription'] = p.desc.strip().decode('utf-8')
if p.flags == pytsk3.TSK_VS_PART_FLAG_ALLOC:
volume.flag = 'alloc'
volume.slot = _util.determine_slot(p.table_num, p.slot_num)
volume_system._assign_disktype_data(volume)
logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(p.start, p.len,
volume.info['fsdescription']))
elif p.flags == pytsk3.TSK_VS_PART_FLAG_UNALLOC:
volume.flag = 'unalloc'
logger.info("Found unallocated space: block offset: {0}, length: {1} ".format(p.start, p.len))
elif p.flags == pytsk3.TSK_VS_PART_FLAG_META:
volume.flag = 'meta'
logger.info("Found meta volume: block offset: {0}, length: {1} ".format(p.start, p.len))
yield volume
class PartedVolumeDetector(VolumeDetector):
type = 'parted'
@dependencies.require(dependencies.parted)
def detect(self, volume_system, vstype='detect'):
"""Finds and mounts all volumes based on parted.
:param VolumeSystem volume_system: The volume system.
"""
# for some reason, parted does not properly return extended volume types in its machine
# output, so we need to execute it twice.
meta_volumes = []
# noinspection PyBroadException
try:
output = _util.check_output_(['parted', volume_system.parent.get_raw_path(), 'print'], stdin=subprocess.PIPE)
for line in output.splitlines():
if 'extended' in line:
meta_volumes.append(int(line.split()[0]))
except Exception:
logger.exception("Failed executing parted command.")
# skip detection of meta volumes
# noinspection PyBroadException
try:
# parted does not support passing in the vstype. It either works, or it doesn't.
cmd = ['parted', volume_system.parent.get_raw_path(), '-sm', 'unit s', 'print free']
output = _util.check_output_(cmd, stdin=subprocess.PIPE)
volume_system.volume_source = 'multi'
except Exception as e:
logger.exception("Failed executing parted command")
raise SubsystemError(e)
num = 0
for line in output.splitlines():
if line.startswith("Warning") or not line or ':' not in line or line.startswith(self.parent.get_raw_path()):
continue
line = line[:-1] # remove last ;
try:
slot, start, end, length, description = line.split(':', 4)
if ':' in description:
description, label, flags = description.split(':', 2)
else:
description, label, flags = description, '', ''
try:
slot = int(slot)
except ValueError:
continue
volume = volume_system._make_subvolume(
index=self._format_index(volume_system, num),
offset=int(start[:-1]) * volume_system.disk.block_size, # remove last s
size=int(length[:-1]) * volume_system.disk.block_size)
volume.info['fsdescription'] = description
if label:
volume.info['label'] = label
if flags:
volume.info['parted_flags'] = flags
# TODO: detection of meta volumes
if description == 'free':
volume.flag = 'unalloc'
logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start[:-1],
length[:-1]))
elif slot in meta_volumes:
volume.flag = 'meta'
volume.slot = slot
logger.info("Found meta volume: block offset: {0}, length: {1}".format(start[:-1], length[:-1]))
else:
volume.flag = 'alloc'
volume.slot = slot
volume_system._assign_disktype_data(volume)
logger.info("Found allocated {2}: block offset: {0}, length: {1} "
.format(start[:-1], length[:-1], volume.info['fsdescription']))
except AttributeError:
logger.exception("Error while parsing parted output")
continue
num += 1
yield volume
class MmlsVolumeDetector(VolumeDetector):
type = 'mmls'
@dependencies.require(dependencies.mmls)
def detect(self, volume_system, vstype='detect'):
"""Finds and mounts all volumes based on mmls."""
try:
cmd = ['mmls']
if volume_system.parent.offset:
cmd.extend(['-o', str(volume_system.parent.offset // volume_system.disk.block_size)])
if vstype in ('dos', 'mac', 'bsd', 'sun', 'gpt'):
cmd.extend(['-t', vstype])
cmd.append(volume_system.parent.get_raw_path())
output = _util.check_output_(cmd, stderr=subprocess.STDOUT)
volume_system.volume_source = 'multi'
except Exception as e:
# some bug in sleuthkit makes detection sometimes difficult, so we hack around it:
if hasattr(e, 'output') and "(GPT or DOS at 0)" in e.output.decode() and vstype != 'gpt':
volume_system.vstype = 'gpt'
# noinspection PyBroadException
try:
logger.warning("Error in retrieving volume info: mmls couldn't decide between GPT and DOS, "
"choosing GPT for you. Use --vstype=dos to force DOS.", exc_info=True)
cmd = ['mmls', '-t', 'gpt', self.parent.get_raw_path()]
output = _util.check_output_(cmd, stderr=subprocess.STDOUT)
volume_system.volume_source = 'multi'
except Exception as e:
logger.exception("Failed executing mmls command")
raise SubsystemError(e)
else:
logger.exception("Failed executing mmls command")
raise SubsystemError(e)
output = output.split("Description", 1)[-1]
for line in output.splitlines():
if not line:
continue
# noinspection PyBroadException
try:
values = line.split(None, 5)
# sometimes there are only 5 elements available
description = ''
index, slot, start, end, length = values[0:5]
if len(values) > 5:
description = values[5]
volume = volume_system._make_subvolume(
index=self._format_index(volume_system, int(index[:-1])),
offset=int(start) * volume_system.disk.block_size,
size=int(length) * volume_system.disk.block_size
)
volume.info['fsdescription'] = description
except Exception:
logger.exception("Error while parsing mmls output")
continue
if slot.lower() == 'meta':
volume.flag = 'meta'
logger.info("Found meta volume: block offset: {0}, length: {1}".format(start, length))
elif slot.lower().startswith('-----'):
volume.flag = 'unalloc'
logger.info("Found unallocated space: block offset: {0}, length: {1}".format(start, length))
else:
volume.flag = 'alloc'
if ":" in slot:
volume.slot = _util.determine_slot(*slot.split(':'))
else:
volume.slot = _util.determine_slot(-1, slot)
volume_system._assign_disktype_data(volume)
logger.info("Found allocated {2}: block offset: {0}, length: {1} ".format(start, length,
volume.info['fsdescription']))
yield volume
class VssVolumeDetector(VolumeDetector):
type = 'vss'
special = True
@dependencies.require(dependencies.vshadowmount)
def detect(self, volume_system, vstype='detect'):
"""Detect volume shadow copy volumes in the specified path."""
path = volume_system.parent._paths['vss']
try:
volume_info = _util.check_output_(["vshadowinfo", "-o", str(volume_system.parent.offset),
volume_system.parent.get_raw_path()])
except Exception as e:
logger.exception("Failed obtaining info from the volume shadow copies.")
raise SubsystemError(e)
current_store = None
for line in volume_info.splitlines():
line = line.strip()
if line.startswith("Store:"):
idx = line.split(":")[-1].strip()
current_store = volume_system._make_subvolume(
index=self._format_index(volume_system, idx), flag='alloc', offset=0
)
current_store._paths['vss_store'] = os.path.join(path, 'vss' + idx)
current_store.info['fsdescription'] = 'VSS Store'
elif line.startswith("Volume size"):
current_store.size = int(line.split(":")[-1].strip().split()[0])
elif line.startswith("Creation time"):
current_store.info['creation_time'] = line.split(":")[-1].strip()
return volume_system.volumes
class LvmVolumeDetector(VolumeDetector):
type = 'lvm'
special = True
@dependencies.require(dependencies.lvm)
def detect(self, volume_system, vstype='detect'):
"""Gather information about lvolumes, gathering their label, size and raw path"""
volume_group = volume_system.parent.info.get('volume_group')
result = _util.check_output_(["lvm", "lvdisplay", volume_group])
cur_v = None
for line in result.splitlines():
if "--- Logical volume ---" in line:
cur_v = volume_system._make_subvolume(
index=self._format_index(volume_system, len(volume_system)),
flag='alloc'
)
cur_v.info['fsdescription'] = 'Logical Volume'
if "LV Name" in line:
cur_v.info['label'] = line.replace("LV Name", "").strip()
if "LV Size" in line:
size, unit = line.replace("LV Size", "").strip().split(" ", 1)
cur_v.size = int(float(size.replace(',', '.')) * {'KiB': 1024, 'MiB': 1024 ** 2,
'GiB': 1024 ** 3, 'TiB': 1024 ** 4}.get(unit, 1))
if "LV Path" in line:
cur_v._paths['lv'] = line.replace("LV Path", "").strip()
cur_v.offset = 0
logger.info("{0} volumes found".format(len(volume_system)))
volume_system.volume_source = 'multi'
return volume_system.volumes
# Populate the VOLUME_SYSTEM_DETECTORS
VOLUME_SYSTEM_DETECTORS = {}
ALL_VOLUME_SYSTEM_DETECTORS = {}
for _, cls in inspect.getmembers(sys.modules[__name__], inspect.isclass):
if issubclass(cls, VolumeDetector) and cls != VolumeDetector and cls.type is not None:
ALL_VOLUME_SYSTEM_DETECTORS[cls.type] = cls()
if not cls.special:
VOLUME_SYSTEM_DETECTORS[cls.type] = cls()
| 42.256732
| 121
| 0.572036
|
4a12e18a37322088249560a01d2cf73b3094cf7d
| 840
|
py
|
Python
|
resolucao/numpy/subplot.py
|
rafaelbes/numericalAnalysis
|
31f2b9cd5fb62cee9a649ac0257024de757eede4
|
[
"MIT"
] | null | null | null |
resolucao/numpy/subplot.py
|
rafaelbes/numericalAnalysis
|
31f2b9cd5fb62cee9a649ac0257024de757eede4
|
[
"MIT"
] | null | null | null |
resolucao/numpy/subplot.py
|
rafaelbes/numericalAnalysis
|
31f2b9cd5fb62cee9a649ac0257024de757eede4
|
[
"MIT"
] | 4
|
2020-12-15T00:31:38.000Z
|
2021-09-09T14:52:19.000Z
|
import matplotlib.pyplot as plt
import numpy as np
figs, axs = plt.subplots(2)
for ax in axs:
ax.axhline(y=0, color='k')
ax.grid(False)
axs[0].axvline(x=2.45, color='k')
axs[0].axvline(x=4.22, color='k')
axs[1].axvline(x=2.45, color='gray', alpha=0.2)
axs[1].axvline(x=4.22, color='gray', alpha=0.2)
x = np.linspace(1.8, 5.2, 100)
f = lambda x: x**3-10*x*x+31*x-30
axs[0].plot(x, f(x), label='Funcao')
axs[1].plot(x, 3*x**2-20*x+31, label='Derivada')
axs[0].scatter([2.45, 4.22], [f(2.45), f(4.22)])
axs[0].annotate('1º ponto crítico', xy=(2.45, f(2.45)), xytext=(3, 2),arrowprops=dict(facecolor='black', headwidth=5.9, shrink=0.1, width=0.2))
axs[0].annotate('2º ponto crítico', xy=(4.22, f(4.22)), xytext=(4, -1),arrowprops=dict(facecolor='black', headwidth=5.9, shrink=0.1, width=0.2))
axs[0].legend()
axs[1].legend()
plt.show()
| 31.111111
| 144
| 0.640476
|
4a12e1da3e6b180e7f0279d711daa0714ba647ac
| 1,052
|
py
|
Python
|
tool_Diameter_decode.py
|
fertiland/pyprotosim
|
b329c060f1cd521e264da8416249a02429f432f3
|
[
"BSD-2-Clause"
] | 12
|
2015-04-23T14:23:29.000Z
|
2021-11-15T11:17:48.000Z
|
tool_Diameter_decode.py
|
fertiland/pyprotosim
|
b329c060f1cd521e264da8416249a02429f432f3
|
[
"BSD-2-Clause"
] | 1
|
2021-03-15T15:50:16.000Z
|
2021-03-15T15:50:16.000Z
|
tool_Diameter_decode.py
|
grmagalhaes/pyprotosim_python3
|
597ddf9f880c5c83465f190bf0e461fcb04e09d9
|
[
"BSD-2-Clause"
] | 12
|
2015-01-26T21:11:22.000Z
|
2021-07-09T22:24:50.000Z
|
#!/usr/bin/env python
##################################################################
# Copyright (c) 2012, Sergej Srepfler <sergej.srepfler@gmail.com>
# February 2012 - March 2012
# Version 0.3, Last change on Oct 24, 2012
# This software is distributed under the terms of BSD license.
##################################################################
# Decode diameter packet into individual AVPs
from libDiameter import *
import sys
if __name__ == "__main__":
# level for decoding are: DEBUG, INFO, WARNING, ERROR, CRITICAL
#logging.basicConfig(level=logging.DEBUG)
LoadDictionary("dictDiameter.xml")
msg=sys.argv[1]
print "="*30
H=HDRItem()
stripHdr(H,msg)
avps=splitMsgAVPs(H.msg)
cmd=dictCOMMANDcode2name(H.flags,H.cmd)
if cmd==ERROR:
print 'Unknown command',H.cmd
else:
print cmd
print "Hop-by-Hop=",H.HopByHop,"End-to-End=",H.EndToEnd,"ApplicationId=",H.appId
for avp in avps:
print "RAW AVP",avp
print "Decoded AVP",decodeAVP(avp)
print "-"*30
| 31.878788
| 84
| 0.587452
|
4a12e280c705a9e203a1ad3248a37c7280fc206c
| 575
|
py
|
Python
|
web/main.py
|
cl3m3nt666/networkInfo
|
3e77a3d5fc603d7c9c44b3ecb42e2a1714ca0dbe
|
[
"Apache-2.0"
] | null | null | null |
web/main.py
|
cl3m3nt666/networkInfo
|
3e77a3d5fc603d7c9c44b3ecb42e2a1714ca0dbe
|
[
"Apache-2.0"
] | null | null | null |
web/main.py
|
cl3m3nt666/networkInfo
|
3e77a3d5fc603d7c9c44b3ecb42e2a1714ca0dbe
|
[
"Apache-2.0"
] | null | null | null |
import bottle
from bottle import get, static_file
import requests
import json
@get("/css/<filepath:re:.*\.css>")
def css(filepath):
return static_file(filepath, root="css/")
@get("/js/<filepath:re:.*\.js>")
def css(filepath):
return static_file(filepath, root="js/")
@bottle.route("/")
@bottle.view("ipinfo.tpl")
def index() :
info = requests.get('http://ipinfo.io/')
j = json.loads(info.text)
return { "title":"IP informations", "ip" : j['ip'], "country" : j['country']}
bottle.run(bottle.app(), host='0.0.0.0', port=80, debug= True, reloader=True)
| 25
| 81
| 0.650435
|
4a12e32e6eef9b495882b26287f77e8ab7191968
| 2,297
|
py
|
Python
|
experiments/karla/diplomski-rad/blade/ont/datasets/n20-f-bacterium-racon-hax/finished-experiments/model-n20-ont-f-bac-racon-hax-11.py
|
lvrcek/consensus-net
|
560957f315751822e1ddf8c097eb7b712ceadff3
|
[
"MIT"
] | null | null | null |
experiments/karla/diplomski-rad/blade/ont/datasets/n20-f-bacterium-racon-hax/finished-experiments/model-n20-ont-f-bac-racon-hax-11.py
|
lvrcek/consensus-net
|
560957f315751822e1ddf8c097eb7b712ceadff3
|
[
"MIT"
] | null | null | null |
experiments/karla/diplomski-rad/blade/ont/datasets/n20-f-bacterium-racon-hax/finished-experiments/model-n20-ont-f-bac-racon-hax-11.py
|
lvrcek/consensus-net
|
560957f315751822e1ddf8c097eb7b712ceadff3
|
[
"MIT"
] | 1
|
2018-12-23T13:50:29.000Z
|
2018-12-23T13:50:29.000Z
|
from comet_ml import Experiment
experiment = Experiment(api_key="oda8KKpxlDgWmJG5KsYrrhmIV", project_name="consensusnet")
import numpy as np
from keras.models import Model
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization, Input
from keras.layers import Conv1D, MaxPooling1D, Conv2D, MaxPool2D
from keras.callbacks import LearningRateScheduler, EarlyStopping, TensorBoard
import sys
module_path = '/home/diplomski-rad/consensus-net/src/python/utils/'
if module_path not in sys.path:
print('Adding utils module.')
sys.path.append(module_path)
from args_parsers import parse_train_args
def main(args):
args = parse_train_args(args)
X_train = np.load(args.X_train)
X_validate = np.load(args.X_validate)
y_train = np.load(args.y_train)
y_validate = np.load(args.y_validate)
model_save_path = args.model_save_path
tensorboard_output_dir = args.tensorboard_output_dir
def lr_schedule(epoch, lr):
if epoch > 50:
if epoch % 10 == 0:
return lr * 0.95
return lr
lr_callback = LearningRateScheduler(lr_schedule)
callbacks = [lr_callback,
EarlyStopping(monitor='val_loss', patience=3),
TensorBoard(log_dir=tensorboard_output_dir, write_images=True, histogram_freq=0)]
input_shape = X_train.shape[1:]
num_output_classes = y_train.shape[1]
input_layer = Input(shape=input_shape)
conv_1 = Conv1D(filters=40, kernel_size=3, padding='same', activation='relu')(input_layer)
pool_1 = MaxPooling1D(pool_size=(2))(conv_1)
conv_2 = Conv1D(filters=40, kernel_size=3, padding='same', activation='relu')(pool_1)
bn_1 = BatchNormalization()(conv_2)
flatten = Flatten()(bn_1)
predictions = Dense(num_output_classes, activation='softmax')(flatten)
model = Model(input_layer, predictions)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
print(model.summary())
batch_size = 10000
epochs = 150
model.fit(X_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(X_validate, y_validate), callbacks=callbacks)
model.save(model_save_path)
if __name__ == '__main__':
main(sys.argv[1:])
| 33.289855
| 132
| 0.708315
|
4a12e346254725c0a2fc7c54c9ef99d0d6ea3e88
| 7,090
|
py
|
Python
|
website/project/tasks.py
|
RabiaAnne/osf.io
|
614d425c03f05113918adfea77e05610d936c9f9
|
[
"Apache-2.0"
] | null | null | null |
website/project/tasks.py
|
RabiaAnne/osf.io
|
614d425c03f05113918adfea77e05610d936c9f9
|
[
"Apache-2.0"
] | null | null | null |
website/project/tasks.py
|
RabiaAnne/osf.io
|
614d425c03f05113918adfea77e05610d936c9f9
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import apps
import logging
from future.moves.urllib.parse import urljoin
import random
import requests
from framework.celery_tasks import app as celery_app
from website import settings, mails
from website.util.share import GraphNode, format_contributor
logger = logging.getLogger(__name__)
@celery_app.task(ignore_results=True)
def on_node_updated(node_id, user_id, first_save, saved_fields, request_headers=None):
# WARNING: Only perform Read-Only operations in an asynchronous task, until Repeatable Read/Serializable
# transactions are implemented in View and Task application layers.
AbstractNode = apps.get_model('osf.AbstractNode')
node = AbstractNode.load(node_id)
if node.is_collection or node.archiving or node.is_quickfiles:
return
need_update = bool(node.SEARCH_UPDATE_FIELDS.intersection(saved_fields))
# due to async nature of call this can issue a search update for a new record (acceptable trade-off)
if bool({'spam_status', 'is_deleted', 'deleted'}.intersection(saved_fields)):
need_update = True
elif not node.is_public and 'is_public' not in saved_fields:
need_update = False
if need_update:
node.update_search()
update_node_share(node)
update_collecting_metadata(node, saved_fields)
if node.get_identifier_value('doi') and bool(node.IDENTIFIER_UPDATE_FIELDS.intersection(saved_fields)):
node.request_identifier_update(category='doi')
def update_collecting_metadata(node, saved_fields):
from website.search.search import update_collected_metadata
if node.is_collected:
if node.is_public:
update_collected_metadata(node._id)
else:
update_collected_metadata(node._id, op='delete')
def update_node_share(node):
# Wrapper that ensures share_url and token exist
if settings.SHARE_URL:
if not settings.SHARE_API_TOKEN:
return logger.warning('SHARE_API_TOKEN not set. Could not send "{}" to SHARE.'.format(node._id))
_update_node_share(node)
def _update_node_share(node):
# Any modifications to this function may need to change _async_update_node_share
data = serialize_share_node_data(node)
resp = send_share_node_data(data)
try:
resp.raise_for_status()
except Exception:
if resp.status_code >= 500:
_async_update_node_share.delay(node._id)
else:
send_desk_share_error(node, resp, 0)
@celery_app.task(bind=True, max_retries=4, acks_late=True)
def _async_update_node_share(self, node_id):
# Any modifications to this function may need to change _update_node_share
# Takes node_id to ensure async retries push fresh data
AbstractNode = apps.get_model('osf.AbstractNode')
node = AbstractNode.load(node_id)
data = serialize_share_node_data(node)
resp = send_share_node_data(data)
try:
resp.raise_for_status()
except Exception as e:
if resp.status_code >= 500:
if self.request.retries == self.max_retries:
send_desk_share_error(node, resp, self.request.retries)
raise self.retry(
exc=e,
countdown=(random.random() + 1) * min(60 + settings.CELERY_RETRY_BACKOFF_BASE ** self.request.retries, 60 * 10)
)
else:
send_desk_share_error(node, resp, self.request.retries)
def send_share_node_data(data):
resp = requests.post('{}api/normalizeddata/'.format(settings.SHARE_URL), json=data, headers={'Authorization': 'Bearer {}'.format(settings.SHARE_API_TOKEN), 'Content-Type': 'application/vnd.api+json'})
logger.debug(resp.content)
return resp
def serialize_share_node_data(node):
return {
'data': {
'type': 'NormalizedData',
'attributes': {
'tasks': [],
'raw': None,
'data': {'@graph': format_registration(node) if node.is_registration else format_node(node)}
}
}
}
def format_node(node):
is_qa_node = bool(set(settings.DO_NOT_INDEX_LIST['tags']).intersection(node.tags.all().values_list('name', flat=True))) \
or any(substring in node.title for substring in settings.DO_NOT_INDEX_LIST['titles'])
return [
{
'@id': '_:123',
'@type': 'workidentifier',
'creative_work': {'@id': '_:789', '@type': 'project'},
'uri': '{}{}/'.format(settings.DOMAIN, node._id),
}, {
'@id': '_:789',
'@type': 'project',
'is_deleted': not node.is_public or node.is_deleted or node.is_spammy or is_qa_node
}
]
def format_registration(node):
is_qa_node = bool(set(settings.DO_NOT_INDEX_LIST['tags']).intersection(node.tags.all().values_list('name', flat=True))) \
or any(substring in node.title for substring in settings.DO_NOT_INDEX_LIST['titles'])
registration_graph = GraphNode('registration', **{
'title': node.title,
'description': node.description or '',
'is_deleted': not node.is_public or node.is_deleted or is_qa_node,
'date_published': node.registered_date.isoformat() if node.registered_date else None,
'registration_type': node.registered_schema.first().name if node.registered_schema else None,
'withdrawn': node.is_retracted,
'justification': node.retraction.justification if node.retraction else None,
})
to_visit = [
registration_graph,
GraphNode('workidentifier', creative_work=registration_graph, uri=urljoin(settings.DOMAIN, node.url))
]
registration_graph.attrs['tags'] = [
GraphNode('throughtags', creative_work=registration_graph, tag=GraphNode('tag', name=tag._id))
for tag in node.tags.all() or [] if tag._id
]
to_visit.extend(format_contributor(registration_graph, user, bool(user._id in node.visible_contributor_ids), i) for i, user in enumerate(node.contributors))
to_visit.extend(GraphNode('AgentWorkRelation', creative_work=registration_graph, agent=GraphNode('institution', name=institution.name)) for institution in node.affiliated_institutions.all())
if node.parent_node:
parent = GraphNode('registration')
to_visit.extend([
parent,
GraphNode('workidentifier', creative_work=parent, uri=urljoin(settings.DOMAIN, node.parent_node.url)),
GraphNode('ispartof', subject=registration_graph, related=parent),
])
visited = set()
to_visit.extend(registration_graph.get_related())
while True:
if not to_visit:
break
n = to_visit.pop(0)
if n in visited:
continue
visited.add(n)
to_visit.extend(list(n.get_related()))
return [node_.serialize() for node_ in visited]
def send_desk_share_error(node, resp, retries):
mails.send_mail(
to_addr=settings.OSF_SUPPORT_EMAIL,
mail=mails.SHARE_ERROR_DESK,
node=node,
resp=resp,
retries=retries,
can_change_preferences=False,
)
| 39.171271
| 204
| 0.680113
|
4a12e35c9256392f5bb0245a1f0dd62f256371ae
| 162
|
py
|
Python
|
tools/skp/page_sets/__init__.py
|
mohad12211/skia
|
042a53aa094715e031ebad4da072524ace316744
|
[
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
tools/skp/page_sets/__init__.py
|
mohad12211/skia
|
042a53aa094715e031ebad4da072524ace316744
|
[
"BSD-3-Clause"
] | 250
|
2018-02-02T23:16:57.000Z
|
2022-03-21T06:09:53.000Z
|
tools/skp/page_sets/__init__.py
|
mohad12211/skia
|
042a53aa094715e031ebad4da072524ace316744
|
[
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
| 40.5
| 72
| 0.771605
|
4a12e37803ac43369c6153634ad84f67554682bf
| 16,335
|
py
|
Python
|
tools/trainval_net.py
|
VDIGPKU/A-quadrilateral-scene-text-detector
|
7e8356e1588210cca5ab6846acc0265d8b5ee029
|
[
"MIT"
] | 1
|
2021-11-05T03:29:28.000Z
|
2021-11-05T03:29:28.000Z
|
tools/trainval_net.py
|
VDIGPKU/A-quadrilateral-scene-text-detector
|
7e8356e1588210cca5ab6846acc0265d8b5ee029
|
[
"MIT"
] | null | null | null |
tools/trainval_net.py
|
VDIGPKU/A-quadrilateral-scene-text-detector
|
7e8356e1588210cca5ab6846acc0265d8b5ee029
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------
# Pytorch multi-GPU Faster R-CNN
# Licensed under The MIT License [see LICENSE for details]
# Written by Jiasen Lu, Jianwei Yang, based on code from Ross Girshick
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import _init_paths
import os
import sys
import numpy as np
import argparse
import pprint
import pdb
import time
import torch
from torch.autograd import Variable
import torch.nn as nn
import torch.optim as optim
import torchvision.transforms as transforms
from torch.utils.data.sampler import Sampler
from lib.roi_data_layer.roidb import combined_roidb
from lib.roi_data_layer.roibatchLoader import RoibatchLoader
from lib.model.utils.config import cfg, cfg_from_file, cfg_from_list, get_output_dir
from lib.model.utils.net_utils import adjust_learning_rate, save_checkpoint, clip_gradient
from lib.model.faster_rcnn.vgg16 import vgg16
from lib.model.faster_rcnn.resnet import resnet
from lib.datasets.factory import adjust_args
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--dataset', dest='dataset',
help='training dataset',
default='pascal_voc', type=str)
parser.add_argument('--net', dest='net',
help='vgg16, res101',
default='vgg16', type=str)
parser.add_argument('--start_epoch', dest='start_epoch',
help='starting epoch',
default=1, type=int)
parser.add_argument('--epochs', dest='max_epochs',
help='number of epochs to train',
default=20, type=int)
parser.add_argument('--disp_interval', dest='disp_interval',
help='number of iterations to display',
default=10, type=int)
parser.add_argument('--checkpoint_interval', dest='checkpoint_interval',
help='number of iterations to display',
default=10000, type=int)
parser.add_argument('--save_dir', dest='save_dir',
help='directory to save models', default="/srv/share/jyang375/models",
nargs=argparse.REMAINDER)
parser.add_argument('--nw', dest='num_workers',
help='number of worker to load data',
default=-1, type=int)
parser.add_argument('--cuda', dest='cuda',
help='whether use CUDA',
action='store_true')
parser.add_argument('--ls', dest='large_scale',
help='whether use large imag scale',
action='store_true')
parser.add_argument('--mGPUs', dest='mGPUs',
help='whether use multiple GPUs',
action='store_true')
parser.add_argument('--bs', dest='batch_size',
help='batch_size',
default=-1, type=int)
parser.add_argument('--cag', dest='class_agnostic',
help='whether perform class_agnostic bbox regression',
action='store_true')
parser.add_argument('--tag', dest='tag',
help='tag of the model',
default=None, type=str)
# config optimization
parser.add_argument('--o', dest='optimizer',
help='training optimizer',
default="sgd", type=str)
parser.add_argument('--lr', dest='lr',
help='starting learning rate',
default=0.001, type=float)
parser.add_argument('--lr_decay_step', dest='lr_decay_step',
help='step to do learning rate decay, unit is epoch',
default=5, type=int)
parser.add_argument('--lr_decay_gamma', dest='lr_decay_gamma',
help='learning rate decay ratio',
default=0.1, type=float)
# set training session
parser.add_argument('--s', dest='session',
help='training session',
default=1, type=int)
# resume trained model
parser.add_argument('--r', dest='resume',
help='resume checkpoint or not',
default=False, type=bool)
parser.add_argument('--checksession', dest='checksession',
help='checksession to load model',
default=1, type=int)
parser.add_argument('--checkepoch', dest='checkepoch',
help='checkepoch to load model',
default=1, type=int)
parser.add_argument('--checkpoint', dest='checkpoint',
help='checkpoint to load model',
default=0, type=int)
# log and diaplay
parser.add_argument('--use_tfboard', dest='use_tfboard',
help='whether use tensorflow tensorboard',
default=False, type=bool)
parser.add_argument('--imdb', dest='imdb_name',
help='dataset to train on',
default='voc_2007_trainval', type=str)
parser.add_argument('--imdbval', dest='imdbval_name',
help='dataset to validate on',
default='voc_2007_test', type=str)
parser.add_argument('--set', dest='set_cfgs',
help='set config keys', default=None,
nargs=argparse.REMAINDER)
args = parser.parse_args()
return args
class sampler(Sampler):
def __init__(self, train_size, batch_size):
self.num_data = train_size
self.num_per_batch = int(train_size / batch_size)
self.batch_size = batch_size
self.range = torch.arange(0, batch_size).view(1, batch_size).long()
self.leftover_flag = False
if train_size % batch_size:
self.leftover = torch.arange(self.num_per_batch * batch_size, train_size).long()
self.leftover_flag = True
def __iter__(self):
rand_num = torch.randperm(self.num_per_batch).view(-1, 1) * self.batch_size
self.rand_num = rand_num.expand(self.num_per_batch, self.batch_size) + self.range
self.rand_num_view = self.rand_num.view(-1)
if self.leftover_flag:
self.rand_num_view = torch.cat((self.rand_num_view, self.leftover), 0)
return iter(self.rand_num_view)
def __len__(self):
return self.num_data
if __name__ == '__main__':
args = parse_args()
if args.use_tfboard:
from lib.model.utils.logger import Logger
# Set the logger
logger = Logger('./logs')
adjust_args(args)
if args.cfg_file is not None:
cfg_from_file(args.cfg_file)
if args.set_cfgs is not None:
cfg_from_list(args.set_cfgs)
if args.batch_size == -1:
args.batch_size = torch.cuda.device_count()
args.num_workers = max(1, args.batch_size // 2)
np.random.seed(cfg.RNG_SEED)
print('Called with args:')
print(args)
# torch.backends.cudnn.benchmark = True
if torch.cuda.is_available() and not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
# train set
# -- Note: Use validation set and disable the flipped to enable faster loading.
cfg.TRAIN.USE_FLIPPED = True
cfg.USE_GPU_NMS = args.cuda
imdb, roidb, ratio_list, ratio_index = combined_roidb(args.imdb_name)
train_size = len(roidb)
print('Using config:')
pprint.pprint(cfg)
print('{:d} roidb entries'.format(len(roidb)))
# output_dir = args.save_dir + "/" + args.net + "/" + args.dataset
output_dir = get_output_dir(imdb, args.tag)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
sampler_batch = sampler(train_size, args.batch_size)
dataset = RoibatchLoader(roidb, ratio_list, ratio_index, args.batch_size,
imdb.num_classes, training=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size,
sampler=sampler_batch, num_workers=args.num_workers)
# initilize the tensor holder here.
im_data = torch.FloatTensor(1)
im_info = torch.FloatTensor(1)
num_boxes = torch.LongTensor(1)
gt_boxes = torch.FloatTensor(1)
# ship to cuda
if args.cuda:
im_data = im_data.cuda()
im_info = im_info.cuda()
num_boxes = num_boxes.cuda()
gt_boxes = gt_boxes.cuda()
# make variable
im_data = Variable(im_data)
im_info = Variable(im_info)
num_boxes = Variable(num_boxes)
gt_boxes = Variable(gt_boxes)
if args.cuda:
cfg.CUDA = True
# initilize the network here.
if args.net == 'vgg16':
fasterRCNN = vgg16(imdb.classes, pretrained=True, class_agnostic=args.class_agnostic)
elif args.net == 'res101':
fasterRCNN = resnet(imdb.classes, 101, pretrained=True, class_agnostic=args.class_agnostic)
elif args.net == 'res50':
fasterRCNN = resnet(imdb.classes, 50, pretrained=True, class_agnostic=args.class_agnostic)
elif args.net == 'res152':
fasterRCNN = resnet(imdb.classes, 152, pretrained=True, class_agnostic=args.class_agnostic)
else:
print("network is not defined")
pdb.set_trace()
fasterRCNN.create_architecture()
lr = cfg.TRAIN.LEARNING_RATE
# lr = args.lr
# tr_momentum = cfg.TRAIN.MOMENTUM
# tr_momentum = args.momentum
params = []
if args.resume:
load_name = os.path.join(output_dir,
'faster_rcnn_epoch_{}.pth'.format(args.checkepoch))
print("loading checkpoint %s" % (load_name))
checkpoint = torch.load(load_name)
args.session = checkpoint['session']
args.start_epoch = checkpoint['epoch']
fasterRCNN.load_state_dict(checkpoint['model'])
if 'pooling_mode' in checkpoint.keys():
cfg.POOLING_MODE = checkpoint['pooling_mode']
print("loaded checkpoint %s" % (load_name))
if args.checkepoch > args.lr_decay_step:
lr *= args.lr_decay_gamma
for key, value in dict(fasterRCNN.named_parameters()).items():
if value.requires_grad:
if 'bias' in key:
params += [{'params': [value], 'lr': lr * (cfg.TRAIN.DOUBLE_BIAS + 1), \
'weight_decay': cfg.TRAIN.BIAS_DECAY and cfg.TRAIN.WEIGHT_DECAY or 0}]
else:
params += [{'params': [value], 'lr': lr, 'weight_decay': cfg.TRAIN.WEIGHT_DECAY}]
if args.optimizer == "adam":
lr = lr * 0.1
optimizer = torch.optim.Adam(params)
elif args.optimizer == "sgd":
optimizer = torch.optim.SGD(params, momentum=cfg.TRAIN.MOMENTUM)
# optimizer.load_state_dict(checkpoint['optimizer'])
# lr = optimizer.param_groups[0]['lr']
if args.mGPUs:
fasterRCNN = nn.DataParallel(fasterRCNN)
if args.cuda:
fasterRCNN.cuda()
iters_per_epoch = int(train_size / args.batch_size)
for epoch in range(args.start_epoch, args.max_epochs + 1):
# setting to train mode
fasterRCNN.train()
loss_temp = 0
start = time.time()
if epoch % (args.lr_decay_step + 1) == 0:
adjust_learning_rate(optimizer, args.lr_decay_gamma)
lr *= args.lr_decay_gamma
data_iter = iter(dataloader)
for step in range(iters_per_epoch):
data = next(data_iter)
im_data.data.resize_(data[0].size()).copy_(data[0])
im_info.data.resize_(data[1].size()).copy_(data[1])
gt_boxes.data.resize_(data[2].size()).copy_(data[2])
num_boxes.data.resize_(data[3].size()).copy_(data[3])
fasterRCNN.zero_grad()
rois, cls_prob, bbox_pred, \
rpn_loss_cls, rpn_loss_box, \
RCNN_loss_cls, RCNN_loss_bbox, RCNN_loss_edge, \
rois_label = fasterRCNN(im_data, im_info, gt_boxes, num_boxes)
if cfg.TRAIN.EDGE_LOSS:
loss = rpn_loss_cls.mean() + rpn_loss_box.mean() \
+ RCNN_loss_cls.mean() + RCNN_loss_bbox.mean() + RCNN_loss_edge.mean()
else:
loss = rpn_loss_cls.mean() + rpn_loss_box.mean() \
+ RCNN_loss_cls.mean() + RCNN_loss_bbox.mean()
loss_temp += loss.data[0]
# backward
optimizer.zero_grad()
loss.backward()
if args.net == "vgg16":
clip_gradient(fasterRCNN, 10.)
optimizer.step()
if step % args.disp_interval == 0:
end = time.time()
if step > 0:
loss_temp /= args.disp_interval
if args.mGPUs:
loss_rpn_cls = rpn_loss_cls.mean().data[0]
loss_rpn_box = rpn_loss_box.mean().data[0]
loss_rcnn_cls = RCNN_loss_cls.mean().data[0]
loss_rcnn_box = RCNN_loss_bbox.mean().data[0]
if cfg.TRAIN.EDGE_LOSS:
loss_rcnn_edge = RCNN_loss_edge.mean().data[0]
hard_cnt = torch.sum(rois_label.data.eq(-1))
fg_cnt = torch.sum(rois_label.data.ne(0)) - hard_cnt
bg_cnt = rois_label.data.numel() - fg_cnt - hard_cnt
else:
loss_rpn_cls = rpn_loss_cls.data[0]
loss_rpn_box = rpn_loss_box.data[0]
loss_rcnn_cls = RCNN_loss_cls.data[0]
loss_rcnn_box = RCNN_loss_bbox.data[0]
if cfg.TRAIN.EDGE_LOSS:
loss_rcnn_edge = RCNN_loss_edge.data[0]
hard_cnt = torch.sum(rois_label.data.eq(-1))
fg_cnt = torch.sum(rois_label.data.ne(0)) - hard_cnt
bg_cnt = rois_label.data.numel() - fg_cnt - hard_cnt
print("[session %d][epoch %2d][iter %4d]\n >>> loss: %.4f, lr: %.2e" \
% (args.session, epoch, step, loss_temp, lr))
print(" >>> fg/bg=(%d/%d), time cost: %f" % (fg_cnt, bg_cnt, end - start))
print(" >>> rpn_cls: %.4f, rpn_box: %.4f\n >>> rcnn_cls: %.4f, rcnn_box %.4f" \
% (loss_rpn_cls, loss_rpn_box, loss_rcnn_cls, loss_rcnn_box))
if cfg.TRAIN.EDGE_LOSS:
print(" >>> rcnn_edge %.4f" %(loss_rcnn_edge))
if args.use_tfboard:
info = {
'loss': loss_temp,
'loss_rpn_cls': loss_rpn_cls,
'loss_rpn_box': loss_rpn_box,
'loss_rcnn_cls': loss_rcnn_cls,
'loss_rcnn_box': loss_rcnn_box
}
for tag, value in info.items():
logger.scalar_summary(tag, value, step)
loss_temp = 0
start = time.time()
if args.mGPUs:
save_name = os.path.join(output_dir, 'faster_rcnn_epoch_{}.pth'.format(epoch))
save_checkpoint({
'session': args.session,
'epoch': epoch + 1,
'model': fasterRCNN.module.state_dict(),
'optimizer': optimizer.state_dict(),
'pooling_mode': cfg.POOLING_MODE,
'class_agnostic': args.class_agnostic,
}, save_name)
else:
save_name = os.path.join(output_dir, 'faster_rcnn_epoch_{}.pth'.format(epoch))
save_checkpoint({
'session': args.session,
'epoch': epoch + 1,
'model': fasterRCNN.state_dict(),
'optimizer': optimizer.state_dict(),
'pooling_mode': cfg.POOLING_MODE,
'class_agnostic': args.class_agnostic,
}, save_name)
print('save model: {}'.format(save_name))
end = time.time()
print(end - start)
| 40.333333
| 101
| 0.57135
|
4a12e5ac592412832211dc42789bdf2e0c8c9870
| 2,248
|
py
|
Python
|
snn/experiments/run_pacb.py
|
KaroliShp/pacbayes-opt
|
fa30b897fd6c3763a4bdb66a9fc9518165841c18
|
[
"Apache-2.0"
] | null | null | null |
snn/experiments/run_pacb.py
|
KaroliShp/pacbayes-opt
|
fa30b897fd6c3763a4bdb66a9fc9518165841c18
|
[
"Apache-2.0"
] | null | null | null |
snn/experiments/run_pacb.py
|
KaroliShp/pacbayes-opt
|
fa30b897fd6c3763a4bdb66a9fc9518165841c18
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
sys.path.insert(0, "/snn/")
sys.path.insert(0, "/")
from snn.core import package_path
from snn.core.parse_args import CompleteParser, Interpreter
from snn.core.utils import deserialize
def run_pacb(weights_rand_init, model, test_set, epochs, learning_rate, drop_lr, lr_factor, seed, trainw):
testX, testY = test_set
save_dict = {"log_post_all": True, "PACB_weights": True, "L2_PACB": False, "diff": False, "iter": 500*50,
"w*": model.get_model_weights(), "mean_weights": True, "var_weights": True, "PACBound": True,
"B_val": True, "KL_val": True, "test_acc": True, "train_acc": True, "log_prior_std": True}
# Optimize the pac bayes bound with this newly trained prior
model.PACB_init
# Checkpoint optimization runs periodically (absolute),
model.optimize_PACB(weights_rand_init, epochs, learning_rate=learning_rate, drop_lr=drop_lr, lr_factor=lr_factor,
save_dict=save_dict, trainWeights=trainw)
model.evaluate_SNN_accuracy(testX, testY, weights_rand_init, N_SNN_samples=1000, save_dict=save_dict)
path = os.path.join(package_path, "experiments", "binary_mnist",
("model_mean_opt{}_LR{}_seed{}.pickle".format(trainw, learning_rate, seed)))
model.save_output(path=path)
path_log = os.path.join(package_path, "experiments", "binary_mnist",
("model_mean_opt{}_LR{}_seed{}.csv".format(trainw, learning_rate, seed)))
model.save_logging_info(path_log)
if __name__ == '__main__':
complete_args = CompleteParser().parse()
_, _, save_path, _ = Interpreter(complete_args).interpret()
deserialization_path = os.path.join(package_path, "experiments", save_path)
print("Loading model weights saved in ", deserialization_path)
model_weights, weights_rand_init = deserialize(deserialization_path)
print("Model weights loaded!")
model, test_set, _, _ = Interpreter(complete_args).interpret(model_weights)
run_pacb(weights_rand_init, model, test_set, complete_args["pacb_epochs"], complete_args["lr"],
complete_args["drop_lr"], complete_args["lr_factor"], complete_args["seed"], complete_args["trainw"])
print("PAC-Bayes run complete!")
| 51.090909
| 117
| 0.70952
|
4a12e5df61df72fc53097fa119cfa87b647c36ef
| 4,956
|
py
|
Python
|
src/rvp.py
|
yochai-safrai/Automatic-RVP
|
fc975e869bd8c1562056dff94e64434ab375af92
|
[
"MIT"
] | null | null | null |
src/rvp.py
|
yochai-safrai/Automatic-RVP
|
fc975e869bd8c1562056dff94e64434ab375af92
|
[
"MIT"
] | null | null | null |
src/rvp.py
|
yochai-safrai/Automatic-RVP
|
fc975e869bd8c1562056dff94e64434ab375af92
|
[
"MIT"
] | null | null | null |
import sys
import concurrent.futures
import os
import numpy as np
import pandas as pd
from itertools import repeat
from stabilization import stabilization, verify_data, StabilizationFail, StabilizationSuccess
from pade import pade, save_results
from clustering import clustering
def run_pade(data_in, start, finish, dir_name):
curr_result = pade(data_in)
if curr_result is None:
return None
save_results(data_in, curr_result, os.path.join(dir_name, f'pade_output_from_{start}_to_{finish}.dat'))
return curr_result.loc[curr_result['imag'] < 0, ['real', 'imag', 'alpha', 'theta', 'imag_err']]
def auto_rvp(
input_file,
threshold=1.3,
minimum_stable_zone_points=10,
interpolation_percentage=0.4,
stabilization_output_size=25,
maximum_derivative=1.0,
skip_stabilization=False,
stabilization_smooth_only=False,
min_pade_input_size=8,
max_pade_input_size=35,
plot=True,
):
"""
:param input_file: Path to input file.
:param threshold: The threshold to determine if a point will be in the stable zone or not. Default is 1.3.
:param minimum_stable_zone_points: The minimum number of points in the stable zone. Default is 10.
:param interpolation_percentage: The percentage of data used when calculating the stable zone. Default is 0.4.
:param stabilization_output_size: The number of points in the output. Default is 25.
:param maximum_derivative: The absolute value of the maximum derivative allowed when calculating the stable zone.
If this derivative is reached, then a stable zone can not be found. Default is 1.0.
:param skip_stabilization: If this option is set to True, then the stabilization phase will be skipped. This option is mutually exclusive
with the stabilization_smooth_only option, and an error will occur if both are True. Default is False.
:param stabilization_smooth_only: If this option is set to True, then the data will be reduced to the given output size by means
of interpolation without trying to find a stable zone. Default is False.
:param min_pade_input_size: The minimum number of points passed in one iteration to pade. Default is 8.
:param max_pade_input_size: The maximum number of points passed in one iteration to pade. Default is 35.
:param plot: Indicator if to plot the stabilization results. Default is True.
"""
if not os.path.exists(input_file):
print('Input file not found', file=sys.stderr)
sys.exit()
if min_pade_input_size < 8:
print('Illegal number for pade minimum input size. Must be 8 or higher', file=sys.stderr)
sys.exit()
if stabilization_output_size < min_pade_input_size:
print('Stable part output size can not be lower then minimum input size for pade', file=sys.stderr)
sys.exit()
if max_pade_input_size < min_pade_input_size:
print('Maximum input size for pade can not be lower then minimum input size for pade', file=sys.stderr)
sys.exit()
if skip_stabilization and stabilization_smooth_only:
print('The arguments skip_stabilization and stabilization_smooth_only can not be both True', file=sys.stderr)
sys.exit()
data = np.genfromtxt(input_file)
error = verify_data(data, threshold, stabilization_output_size, minimum_stable_zone_points,
maximum_derivative, interpolation_percentage)
if error is not None:
print(error, file=sys.stderr)
sys.exit()
stabilization_result = stabilization(data, threshold, stabilization_output_size, stabilization_output_size, interpolation_percentage,
minimum_stable_zone_points, stabilization_smooth_only) if not skip_stabilization \
else StabilizationSuccess(data, data)
if isinstance(stabilization_result, StabilizationFail):
print(stabilization_result, file=sys.stderr)
sys.exit()
stable_zone = stabilization_result.get_results()
dir_name = 'results' + '_' + input_file.split('.')[0]
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
# save results to output dir
np.savetxt(os.path.join(dir_name, 'stabilization_output.dat'), stable_zone, fmt='%.15f')
# copy original data to output dir
np.savetxt(os.path.join(dir_name, input_file), data, fmt='%.15f')
sliced_data = []
left = []
right = []
for i in range(len(stable_zone - min_pade_input_size) + 1):
for j in range(min_pade_input_size, max_pade_input_size + 1):
if i + j > len(stable_zone):
continue
left.append(i + 1)
right.append(i + j)
sliced_data.append(stable_zone[i:i + j])
with concurrent.futures.ProcessPoolExecutor() as executor:
results = executor.map(run_pade, sliced_data, left, right, repeat(dir_name))
result_df = pd.DataFrame()
for r in results:
if r is not None:
result_df = pd.concat([result_df, r])
result_df.to_csv(os.path.join(dir_name, 'clustering_input.csv'), index=False)
clustering_results = clustering(result_df)
if clustering_results is None:
print('Failed to find a cluster')
else:
clustering_results.save_results(dir_name)
if not skip_stabilization and plot:
stabilization_result.plot_results(dir_name)
| 39.648
| 138
| 0.770178
|
4a12e64f5ba5b8652bb486e1c24b0d68ad2ca46c
| 28,116
|
py
|
Python
|
trac/config.py
|
wiraqutra/photrackjp
|
e120cba2a5d5d30f99ad084c6521e61f09694ee6
|
[
"BSD-3-Clause"
] | null | null | null |
trac/config.py
|
wiraqutra/photrackjp
|
e120cba2a5d5d30f99ad084c6521e61f09694ee6
|
[
"BSD-3-Clause"
] | null | null | null |
trac/config.py
|
wiraqutra/photrackjp
|
e120cba2a5d5d30f99ad084c6521e61f09694ee6
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2005-2009 Edgewall Software
# Copyright (C) 2005-2007 Christopher Lenz <cmlenz@gmx.de>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from ConfigParser import ConfigParser
from copy import deepcopy
import os.path
from trac.admin import AdminCommandError, IAdminCommandProvider
from trac.core import *
from trac.util import AtomicFile, as_bool
from trac.util.compat import any
from trac.util.text import printout, to_unicode, CRLF
from trac.util.translation import _, N_
__all__ = ['Configuration', 'Option', 'BoolOption', 'IntOption', 'FloatOption',
'ListOption', 'ChoiceOption', 'PathOption', 'ExtensionOption',
'OrderedExtensionsOption', 'ConfigurationError']
# Retained for backward-compatibility, use as_bool() instead
_TRUE_VALUES = ('yes', 'true', 'enabled', 'on', 'aye', '1', 1, True)
_use_default = object()
def _to_utf8(basestr):
return to_unicode(basestr).encode('utf-8')
class ConfigurationError(TracError):
"""Exception raised when a value in the configuration file is not valid."""
title = N_('Configuration Error')
class Configuration(object):
"""Thin layer over `ConfigParser` from the Python standard library.
In addition to providing some convenience methods, the class remembers
the last modification time of the configuration file, and reparses it
when the file has changed.
"""
def __init__(self, filename):
self.filename = filename
self.parser = ConfigParser()
self._old_sections = {}
self.parents = []
self._lastmtime = 0
self._sections = {}
self.parse_if_needed(force=True)
def __contains__(self, name):
"""Return whether the configuration contains a section of the given
name.
"""
return name in self.sections()
def __getitem__(self, name):
"""Return the configuration section with the specified name."""
if name not in self._sections:
self._sections[name] = Section(self, name)
return self._sections[name]
def __repr__(self):
return '<%s %r>' % (self.__class__.__name__, self.filename)
def get(self, section, key, default=''):
"""Return the value of the specified option.
Valid default input is a string. Returns a string.
"""
return self[section].get(key, default)
def getbool(self, section, key, default=''):
"""Return the specified option as boolean value.
If the value of the option is one of "yes", "true", "enabled", "on",
or "1", this method wll return `True`, otherwise `False`.
Valid default input is a string or a bool. Returns a bool.
(since Trac 0.9.3, "enabled" added in 0.11)
"""
return self[section].getbool(key, default)
def getint(self, section, key, default=''):
"""Return the value of the specified option as integer.
If the specified option can not be converted to an integer, a
`ConfigurationError` exception is raised.
Valid default input is a string or an int. Returns an int.
(since Trac 0.10)
"""
return self[section].getint(key, default)
def getfloat(self, section, key, default=''):
"""Return the value of the specified option as float.
If the specified option can not be converted to a float, a
`ConfigurationError` exception is raised.
Valid default input is a string, float or int. Returns a float.
(since Trac 0.12)
"""
return self[section].getfloat(key, default)
def getlist(self, section, key, default='', sep=',', keep_empty=False):
"""Return a list of values that have been specified as a single
comma-separated option.
A different separator can be specified using the `sep` parameter. If
the `keep_empty` parameter is set to `True`, empty elements are
included in the list.
Valid default input is a string or a list. Returns a string.
(since Trac 0.10)
"""
return self[section].getlist(key, default, sep, keep_empty)
def getpath(self, section, key, default=''):
"""Return a configuration value as an absolute path.
Relative paths are resolved relative to the location of this
configuration file.
Valid default input is a string. Returns a normalized path.
(enabled since Trac 0.11.5)
"""
return self[section].getpath(key, default)
def set(self, section, key, value):
"""Change a configuration value.
These changes are not persistent unless saved with `save()`.
"""
self[section].set(key, value)
def defaults(self, compmgr=None):
"""Returns a dictionary of the default configuration values
(''since 0.10'').
If `compmgr` is specified, return only options declared in components
that are enabled in the given `ComponentManager`.
"""
defaults = {}
for (section, key), option in Option.get_registry(compmgr).items():
defaults.setdefault(section, {})[key] = option.default
return defaults
def options(self, section, compmgr=None):
"""Return a list of `(name, value)` tuples for every option in the
specified section.
This includes options that have default values that haven't been
overridden. If `compmgr` is specified, only return default option
values for components that are enabled in the given `ComponentManager`.
"""
return self[section].options(compmgr)
def remove(self, section, key):
"""Remove the specified option."""
self[section].remove(key)
def sections(self, compmgr=None, defaults=True):
"""Return a list of section names.
If `compmgr` is specified, only the section names corresponding to
options declared in components that are enabled in the given
`ComponentManager` are returned.
"""
sections = set([to_unicode(s) for s in self.parser.sections()])
for parent in self.parents:
sections.update(parent.sections(compmgr, defaults=False))
if defaults:
sections.update(self.defaults(compmgr))
return sorted(sections)
def has_option(self, section, option, defaults=True):
"""Returns True if option exists in section in either the project
trac.ini or one of the parents, or is available through the Option
registry.
(since Trac 0.11)
"""
section_str = _to_utf8(section)
if self.parser.has_section(section_str):
if _to_utf8(option) in self.parser.options(section_str):
return True
for parent in self.parents:
if parent.has_option(section, option, defaults=False):
return True
return defaults and (section, option) in Option.registry
def save(self):
"""Write the configuration options to the primary file."""
if not self.filename:
return
# Only save options that differ from the defaults
sections = []
for section in self.sections():
section_str = _to_utf8(section)
options = []
for option in self[section]:
default_str = None
for parent in self.parents:
if parent.has_option(section, option, defaults=False):
default_str = _to_utf8(parent.get(section, option))
break
option_str = _to_utf8(option)
current_str = False
if self.parser.has_option(section_str, option_str):
current_str = self.parser.get(section_str, option_str)
if current_str is not False and current_str != default_str:
options.append((option_str, current_str))
if options:
sections.append((section_str, sorted(options)))
# At this point, all the strings in `sections` are UTF-8 encoded `str`
try:
fileobj = AtomicFile(self.filename, 'w')
try:
fileobj.write('# -*- coding: utf-8 -*-\n\n')
for section, options in sections:
fileobj.write('[%s]\n' % section)
for key_str, val_str in options:
if to_unicode(key_str) in self[section].overridden:
fileobj.write('# %s = <inherited>\n' % key_str)
else:
val_str = val_str.replace(CRLF, '\n') \
.replace('\n', '\n ')
fileobj.write('%s = %s\n' % (key_str, val_str))
fileobj.write('\n')
finally:
fileobj.close()
self._old_sections = deepcopy(self.parser._sections)
except Exception:
# Revert all changes to avoid inconsistencies
self.parser._sections = deepcopy(self._old_sections)
raise
def parse_if_needed(self, force=False):
if not self.filename or not os.path.isfile(self.filename):
return False
changed = False
modtime = os.path.getmtime(self.filename)
if force or modtime > self._lastmtime:
self._sections = {}
self.parser._sections = {}
self.parser.read(self.filename)
self._lastmtime = modtime
self._old_sections = deepcopy(self.parser._sections)
changed = True
if changed:
self.parents = []
if self.parser.has_option('inherit', 'file'):
for filename in self.parser.get('inherit', 'file').split(','):
filename = to_unicode(filename.strip())
if not os.path.isabs(filename):
filename = os.path.join(os.path.dirname(self.filename),
filename)
self.parents.append(Configuration(filename))
else:
for parent in self.parents:
changed |= parent.parse_if_needed(force=force)
if changed:
self._cache = {}
return changed
def touch(self):
if self.filename and os.path.isfile(self.filename) \
and os.access(self.filename, os.W_OK):
os.utime(self.filename, None)
def set_defaults(self, compmgr=None):
"""Retrieve all default values and store them explicitly in the
configuration, so that they can be saved to file.
Values already set in the configuration are not overridden.
"""
for section, default_options in self.defaults(compmgr).items():
for name, value in default_options.items():
if not self.parser.has_option(_to_utf8(section),
_to_utf8(name)):
if any(parent[section].contains(name, defaults=False)
for parent in self.parents):
value = None
self.set(section, name, value)
class Section(object):
"""Proxy for a specific configuration section.
Objects of this class should not be instantiated directly.
"""
__slots__ = ['config', 'name', 'overridden', '_cache']
def __init__(self, config, name):
self.config = config
self.name = name
self.overridden = {}
self._cache = {}
def contains(self, key, defaults=True):
if self.config.parser.has_option(_to_utf8(self.name), _to_utf8(key)):
return True
for parent in self.config.parents:
if parent[self.name].contains(key, defaults=False):
return True
return defaults and Option.registry.has_key((self.name, key))
__contains__ = contains
def iterate(self, compmgr=None, defaults=True):
"""Iterate over the options in this section.
If `compmgr` is specified, only return default option values for
components that are enabled in the given `ComponentManager`.
"""
options = set()
name_str = _to_utf8(self.name)
if self.config.parser.has_section(name_str):
for option_str in self.config.parser.options(name_str):
option = to_unicode(option_str)
options.add(option.lower())
yield option
for parent in self.config.parents:
for option in parent[self.name].iterate(defaults=False):
loption = option.lower()
if loption not in options:
options.add(loption)
yield option
if defaults:
for section, option in Option.get_registry(compmgr).keys():
if section == self.name and option.lower() not in options:
yield option
__iter__ = iterate
def __repr__(self):
return '<Section [%s]>' % (self.name)
def get(self, key, default=''):
"""Return the value of the specified option.
Valid default input is a string. Returns a string.
"""
cached = self._cache.get(key, _use_default)
if cached is not _use_default:
return cached
name_str = _to_utf8(self.name)
key_str = _to_utf8(key)
if self.config.parser.has_option(name_str, key_str):
value = self.config.parser.get(name_str, key_str)
else:
for parent in self.config.parents:
value = parent[self.name].get(key, _use_default)
if value is not _use_default:
break
else:
if default is not _use_default:
option = Option.registry.get((self.name, key))
value = option and option.default or _use_default
else:
value = _use_default
if value is _use_default:
return default
if not value:
value = u''
elif isinstance(value, basestring):
value = to_unicode(value)
self._cache[key] = value
return value
def getbool(self, key, default=''):
"""Return the value of the specified option as boolean.
This method returns `True` if the option value is one of "yes", "true",
"enabled", "on", or non-zero numbers, ignoring case. Otherwise `False`
is returned.
Valid default input is a string or a bool. Returns a bool.
"""
return as_bool(self.get(key, default))
def getint(self, key, default=''):
"""Return the value of the specified option as integer.
If the specified option can not be converted to an integer, a
`ConfigurationError` exception is raised.
Valid default input is a string or an int. Returns an int.
"""
value = self.get(key, default)
if not value:
return 0
try:
return int(value)
except ValueError:
raise ConfigurationError(
_('[%(section)s] %(entry)s: expected integer, got %(value)s',
section=self.name, entry=key, value=repr(value)))
def getfloat(self, key, default=''):
"""Return the value of the specified option as float.
If the specified option can not be converted to a float, a
`ConfigurationError` exception is raised.
Valid default input is a string, float or int. Returns a float.
"""
value = self.get(key, default)
if not value:
return 0.0
try:
return float(value)
except ValueError:
raise ConfigurationError(
_('[%(section)s] %(entry)s: expected float, got %(value)s',
section=self.name, entry=key, value=repr(value)))
def getlist(self, key, default='', sep=',', keep_empty=True):
"""Return a list of values that have been specified as a single
comma-separated option.
A different separator can be specified using the `sep` parameter. If
the `keep_empty` parameter is set to `False`, empty elements are omitted
from the list.
Valid default input is a string or a list. Returns a list.
"""
value = self.get(key, default)
if not value:
return []
if isinstance(value, basestring):
items = [item.strip() for item in value.split(sep)]
else:
items = list(value)
if not keep_empty:
items = filter(None, items)
return items
def getpath(self, key, default=''):
"""Return the value of the specified option as a path, relative to
the location of this configuration file.
Valid default input is a string. Returns a normalized path.
"""
path = self.get(key, default)
if not path:
return default
if not os.path.isabs(path):
path = os.path.join(os.path.dirname(self.config.filename), path)
return os.path.normcase(os.path.realpath(path))
def options(self, compmgr=None):
"""Return `(key, value)` tuples for every option in the section.
This includes options that have default values that haven't been
overridden. If `compmgr` is specified, only return default option
values for components that are enabled in the given `ComponentManager`.
"""
for key in self.iterate(compmgr):
yield key, self.get(key)
def set(self, key, value):
"""Change a configuration value.
These changes are not persistent unless saved with `save()`.
"""
self._cache.pop(key, None)
name_str = _to_utf8(self.name)
key_str = _to_utf8(key)
if not self.config.parser.has_section(name_str):
self.config.parser.add_section(name_str)
if value is None:
self.overridden[key] = True
value_str = ''
else:
value_str = _to_utf8(value)
return self.config.parser.set(name_str, key_str, value_str)
def remove(self, key):
"""Delete a key from this section.
Like for `set()`, the changes won't persist until `save()` gets called.
"""
name_str = _to_utf8(self.name)
if self.config.parser.has_section(name_str):
self._cache.pop(key, None)
self.config.parser.remove_option(_to_utf8(self.name), _to_utf8(key))
class Option(object):
"""Descriptor for configuration options on `Configurable` subclasses."""
registry = {}
accessor = Section.get
@staticmethod
def get_registry(compmgr=None):
"""Return the option registry, as a `dict` mapping `(section, key)`
tuples to `Option` objects.
If `compmgr` is specified, only return options for components that are
enabled in the given `ComponentManager`.
"""
if compmgr is None:
return Option.registry
from trac.core import ComponentMeta
components = {}
for cls in ComponentMeta._components:
for attr in cls.__dict__.itervalues():
if isinstance(attr, Option):
components[attr] = cls
return dict(each for each in Option.registry.items()
if each[1] not in components
or compmgr.is_enabled(components[each[1]]))
def __init__(self, section, name, default=None, doc=''):
"""Create the configuration option.
@param section: the name of the configuration section this option
belongs to
@param name: the name of the option
@param default: the default value for the option
@param doc: documentation of the option
"""
self.section = section
self.name = name
self.default = default
self.registry[(self.section, self.name)] = self
self.__doc__ = doc
def __get__(self, instance, owner):
if instance is None:
return self
config = getattr(instance, 'config', None)
if config and isinstance(config, Configuration):
section = config[self.section]
value = self.accessor(section, self.name, self.default)
return value
return None
def __set__(self, instance, value):
raise AttributeError, 'can\'t set attribute'
def __repr__(self):
return '<%s [%s] "%s">' % (self.__class__.__name__, self.section,
self.name)
class BoolOption(Option):
"""Descriptor for boolean configuration options."""
accessor = Section.getbool
class IntOption(Option):
"""Descriptor for integer configuration options."""
accessor = Section.getint
class FloatOption(Option):
"""Descriptor for float configuration options."""
accessor = Section.getfloat
class ListOption(Option):
"""Descriptor for configuration options that contain multiple values
separated by a specific character.
"""
def __init__(self, section, name, default=None, sep=',', keep_empty=False,
doc=''):
Option.__init__(self, section, name, default, doc)
self.sep = sep
self.keep_empty = keep_empty
def accessor(self, section, name, default):
return section.getlist(name, default, self.sep, self.keep_empty)
class ChoiceOption(Option):
"""Descriptor for configuration options providing a choice among a list
of items.
The default value is the first choice in the list.
"""
def __init__(self, section, name, choices, doc=''):
Option.__init__(self, section, name, _to_utf8(choices[0]), doc)
self.choices = set(_to_utf8(choice).strip() for choice in choices)
def accessor(self, section, name, default):
value = section.get(name, default)
if value not in self.choices:
raise ConfigurationError(
_('[%(section)s] %(entry)s: expected one of '
'(%(choices)s), got %(value)s',
section=section.name, entry=name, value=repr(value),
choices=', '.join('"%s"' % c
for c in sorted(self.choices))))
return value
class PathOption(Option):
"""Descriptor for file system path configuration options."""
accessor = Section.getpath
class ExtensionOption(Option):
def __init__(self, section, name, interface, default=None, doc=''):
Option.__init__(self, section, name, default, doc)
self.xtnpt = ExtensionPoint(interface)
def __get__(self, instance, owner):
if instance is None:
return self
value = Option.__get__(self, instance, owner)
for impl in self.xtnpt.extensions(instance):
if impl.__class__.__name__ == value:
return impl
raise AttributeError('Cannot find an implementation of the "%s" '
'interface named "%s". Please update the option '
'%s.%s in trac.ini.'
% (self.xtnpt.interface.__name__, value,
self.section, self.name))
class OrderedExtensionsOption(ListOption):
"""A comma separated, ordered, list of components implementing `interface`.
Can be empty.
If `include_missing` is true (the default) all components implementing the
interface are returned, with those specified by the option ordered first."""
def __init__(self, section, name, interface, default=None,
include_missing=True, doc=''):
ListOption.__init__(self, section, name, default, doc=doc)
self.xtnpt = ExtensionPoint(interface)
self.include_missing = include_missing
def __get__(self, instance, owner):
if instance is None:
return self
order = ListOption.__get__(self, instance, owner)
components = []
for impl in self.xtnpt.extensions(instance):
if self.include_missing or impl.__class__.__name__ in order:
components.append(impl)
def compare(x, y):
x, y = x.__class__.__name__, y.__class__.__name__
if x not in order:
return int(y in order)
if y not in order:
return -int(x in order)
return cmp(order.index(x), order.index(y))
components.sort(compare)
return components
class ConfigurationAdmin(Component):
"""trac-admin command provider for trac.ini administration."""
implements(IAdminCommandProvider)
# IAdminCommandProvider methods
def get_admin_commands(self):
yield ('config get', '<section> <option>',
'Get the value of the given option in "trac.ini"',
self._complete_config, self._do_get)
yield ('config remove', '<section> <option>',
'Remove the specified option from "trac.ini"',
self._complete_config, self._do_remove)
yield ('config set', '<section> <option> <value>',
'Set the value for the given option in "trac.ini"',
self._complete_config, self._do_set)
def _complete_config(self, args):
if len(args) == 1:
return self.config.sections()
elif len(args) == 2:
return [name for (name, value) in self.config[args[0]].options()]
def _do_get(self, section, option):
if not self.config.has_option(section, option):
raise AdminCommandError(
_("Option '%(option)s' doesn't exist in section '%(section)s'",
option=option, section=section))
printout(self.config.get(section, option))
def _do_set(self, section, option, value):
self.config.set(section, option, value)
self.config.save()
if section == 'inherit' and option == 'file':
self.config.parse_if_needed(force=True) # Full reload
def _do_remove(self, section, option):
if not self.config.has_option(section, option):
raise AdminCommandError(
_("Option '%(option)s' doesn't exist in section '%(section)s'",
option=option, section=section))
self.config.remove(section, option)
self.config.save()
if section == 'inherit' and option == 'file':
self.config.parse_if_needed(force=True) # Full reload
| 38.620879
| 82
| 0.577358
|
4a12e6641e642560f9a8c724ee05ea437c67f5b8
| 32,584
|
py
|
Python
|
tools/idf_monitor.py
|
ebertn/esp-idf-nes
|
9dc7e9a258fd378d36408a1348253b373974b431
|
[
"Apache-2.0"
] | 2
|
2020-06-23T08:05:58.000Z
|
2020-06-24T01:25:51.000Z
|
tools/idf_monitor.py
|
ebertn/esp-idf-nes
|
9dc7e9a258fd378d36408a1348253b373974b431
|
[
"Apache-2.0"
] | 2
|
2022-03-29T05:16:50.000Z
|
2022-03-29T05:16:50.000Z
|
vendors/espressif/esp-idf/tools/idf_monitor.py
|
ictk-solution-dev/amazon-freertos
|
cc76512292ddfb70bba3030dbcb740ef3c6ead8b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
#
# esp-idf serial output monitor tool. Does some helpful things:
# - Looks up hex addresses in ELF file with addr2line
# - Reset ESP32 via serial RTS line (Ctrl-T Ctrl-R)
# - Run "make (or idf.py) flash" (Ctrl-T Ctrl-F)
# - Run "make (or idf.py) app-flash" (Ctrl-T Ctrl-A)
# - If gdbstub output is detected, gdb is automatically loaded
#
# Copyright 2015-2016 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Contains elements taken from miniterm "Very simple serial terminal" which
# is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# Originally released under BSD-3-Clause license.
#
from __future__ import print_function, division
from __future__ import unicode_literals
from builtins import chr
from builtins import object
from builtins import bytes
import subprocess
import argparse
import codecs
import re
import os
try:
import queue
except ImportError:
import Queue as queue
import shlex
import time
import sys
import serial
import serial.tools.miniterm as miniterm
import threading
import ctypes
import types
from distutils.version import StrictVersion
key_description = miniterm.key_description
# Control-key characters
CTRL_A = '\x01'
CTRL_B = '\x02'
CTRL_F = '\x06'
CTRL_H = '\x08'
CTRL_R = '\x12'
CTRL_T = '\x14'
CTRL_Y = '\x19'
CTRL_P = '\x10'
CTRL_X = '\x18'
CTRL_RBRACKET = '\x1d' # Ctrl+]
# ANSI terminal codes (if changed, regular expressions in LineMatcher need to be udpated)
ANSI_RED = '\033[1;31m'
ANSI_YELLOW = '\033[0;33m'
ANSI_NORMAL = '\033[0m'
def color_print(message, color):
""" Print a message to stderr with colored highlighting """
sys.stderr.write("%s%s%s\n" % (color, message, ANSI_NORMAL))
def yellow_print(message):
color_print(message, ANSI_YELLOW)
def red_print(message):
color_print(message, ANSI_RED)
__version__ = "1.1"
# Tags for tuples in queues
TAG_KEY = 0
TAG_SERIAL = 1
TAG_SERIAL_FLUSH = 2
# regex matches an potential PC value (0x4xxxxxxx)
MATCH_PCADDR = re.compile(r'0x4[0-9a-f]{7}', re.IGNORECASE)
DEFAULT_TOOLCHAIN_PREFIX = "xtensa-esp32-elf-"
DEFAULT_PRINT_FILTER = ""
class StoppableThread(object):
"""
Provide a Thread-like class which can be 'cancelled' via a subclass-provided
cancellation method.
Can be started and stopped multiple times.
Isn't an instance of type Thread because Python Thread objects can only be run once
"""
def __init__(self):
self._thread = None
@property
def alive(self):
"""
Is 'alive' whenever the internal thread object exists
"""
return self._thread is not None
def start(self):
if self._thread is None:
self._thread = threading.Thread(target=self._run_outer)
self._thread.start()
def _cancel(self):
pass # override to provide cancellation functionality
def run(self):
pass # override for the main thread behaviour
def _run_outer(self):
try:
self.run()
finally:
self._thread = None
def stop(self):
if self._thread is not None:
old_thread = self._thread
self._thread = None
self._cancel()
old_thread.join()
class ConsoleReader(StoppableThread):
""" Read input keys from the console and push them to the queue,
until stopped.
"""
def __init__(self, console, event_queue, test_mode):
super(ConsoleReader, self).__init__()
self.console = console
self.event_queue = event_queue
self.test_mode = test_mode
def run(self):
self.console.setup()
try:
while self.alive:
try:
if os.name == 'nt':
# Windows kludge: because the console.cancel() method doesn't
# seem to work to unblock getkey() on the Windows implementation.
#
# So we only call getkey() if we know there's a key waiting for us.
import msvcrt
while not msvcrt.kbhit() and self.alive:
time.sleep(0.1)
if not self.alive:
break
elif self.test_mode:
# In testing mode the stdin is connected to PTY but is not used for input anything. For PTY
# the canceling by fcntl.ioctl isn't working and would hang in self.console.getkey().
# Therefore, we avoid calling it.
while self.alive:
time.sleep(0.1)
break
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if c is not None:
self.event_queue.put((TAG_KEY, c), False)
finally:
self.console.cleanup()
def _cancel(self):
if os.name == 'posix' and not self.test_mode:
# this is the way cancel() is implemented in pyserial 3.3 or newer,
# older pyserial (3.1+) has cancellation implemented via 'select',
# which does not work when console sends an escape sequence response
#
# even older pyserial (<3.1) does not have this method
#
# on Windows there is a different (also hacky) fix, applied above.
#
# note that TIOCSTI is not implemented in WSL / bash-on-Windows.
# TODO: introduce some workaround to make it work there.
#
# Note: This would throw exception in testing mode when the stdin is connected to PTY.
import fcntl
import termios
fcntl.ioctl(self.console.fd, termios.TIOCSTI, b'\0')
class SerialReader(StoppableThread):
""" Read serial data from the serial port and push to the
event queue, until stopped.
"""
def __init__(self, serial, event_queue):
super(SerialReader, self).__init__()
self.baud = serial.baudrate
self.serial = serial
self.event_queue = event_queue
if not hasattr(self.serial, 'cancel_read'):
# enable timeout for checking alive flag,
# if cancel_read not available
self.serial.timeout = 0.25
def run(self):
if not self.serial.is_open:
self.serial.baudrate = self.baud
self.serial.rts = True # Force an RTS reset on open
self.serial.open()
self.serial.rts = False
try:
while self.alive:
data = self.serial.read(self.serial.in_waiting or 1)
if len(data):
self.event_queue.put((TAG_SERIAL, data), False)
finally:
self.serial.close()
def _cancel(self):
if hasattr(self.serial, 'cancel_read'):
try:
self.serial.cancel_read()
except Exception:
pass
class LineMatcher(object):
"""
Assembles a dictionary of filtering rules based on the --print_filter
argument of idf_monitor. Then later it is used to match lines and
determine whether they should be shown on screen or not.
"""
LEVEL_N = 0
LEVEL_E = 1
LEVEL_W = 2
LEVEL_I = 3
LEVEL_D = 4
LEVEL_V = 5
level = {'N': LEVEL_N, 'E': LEVEL_E, 'W': LEVEL_W, 'I': LEVEL_I, 'D': LEVEL_D,
'V': LEVEL_V, '*': LEVEL_V, '': LEVEL_V}
def __init__(self, print_filter):
self._dict = dict()
self._re = re.compile(r'^(?:\033\[[01];?[0-9]+m?)?([EWIDV]) \([0-9]+\) ([^:]+): ')
items = print_filter.split()
if len(items) == 0:
self._dict["*"] = self.LEVEL_V # default is to print everything
for f in items:
s = f.split(r':')
if len(s) == 1:
# specifying no warning level defaults to verbose level
lev = self.LEVEL_V
elif len(s) == 2:
if len(s[0]) == 0:
raise ValueError('No tag specified in filter ' + f)
try:
lev = self.level[s[1].upper()]
except KeyError:
raise ValueError('Unknown warning level in filter ' + f)
else:
raise ValueError('Missing ":" in filter ' + f)
self._dict[s[0]] = lev
def match(self, line):
try:
m = self._re.search(line)
if m:
lev = self.level[m.group(1)]
if m.group(2) in self._dict:
return self._dict[m.group(2)] >= lev
return self._dict.get("*", self.LEVEL_N) >= lev
except (KeyError, IndexError):
# Regular line written with something else than ESP_LOG*
# or an empty line.
pass
# We need something more than "*.N" for printing.
return self._dict.get("*", self.LEVEL_N) > self.LEVEL_N
class SerialStopException(Exception):
"""
This exception is used for stopping the IDF monitor in testing mode.
"""
pass
class Monitor(object):
"""
Monitor application main class.
This was originally derived from miniterm.Miniterm, but it turned out to be easier to write from scratch for this
purpose.
Main difference is that all event processing happens in the main thread, not the worker threads.
"""
def __init__(self, serial_instance, elf_file, print_filter, make="make", toolchain_prefix=DEFAULT_TOOLCHAIN_PREFIX, eol="CRLF"):
super(Monitor, self).__init__()
self.event_queue = queue.Queue()
self.console = miniterm.Console()
if os.name == 'nt':
sys.stderr = ANSIColorConverter(sys.stderr, decode_output=True)
self.console.output = ANSIColorConverter(self.console.output)
self.console.byte_output = ANSIColorConverter(self.console.byte_output)
if StrictVersion(serial.VERSION) < StrictVersion('3.3.0'):
# Use Console.getkey implementation from 3.3.0 (to be in sync with the ConsoleReader._cancel patch above)
def getkey_patched(self):
c = self.enc_stdin.read(1)
if c == chr(0x7f):
c = chr(8) # map the BS key (which yields DEL) to backspace
return c
self.console.getkey = types.MethodType(getkey_patched, self.console)
socket_mode = serial_instance.port.startswith("socket://") # testing hook - data from serial can make exit the monitor
self.serial = serial_instance
self.console_reader = ConsoleReader(self.console, self.event_queue, socket_mode)
self.serial_reader = SerialReader(self.serial, self.event_queue)
self.elf_file = elf_file
if not os.path.exists(make):
self.make = shlex.split(make) # allow for possibility the "make" arg is a list of arguments (for idf.py)
else:
self.make = make
self.toolchain_prefix = toolchain_prefix
self.menu_key = CTRL_T
self.exit_key = CTRL_RBRACKET
self.translate_eol = {
"CRLF": lambda c: c.replace("\n", "\r\n"),
"CR": lambda c: c.replace("\n", "\r"),
"LF": lambda c: c.replace("\r", "\n"),
}[eol]
# internal state
self._pressed_menu_key = False
self._last_line_part = b""
self._gdb_buffer = b""
self._pc_address_buffer = b""
self._line_matcher = LineMatcher(print_filter)
self._invoke_processing_last_line_timer = None
self._force_line_print = False
self._output_enabled = True
self._serial_check_exit = socket_mode
def invoke_processing_last_line(self):
self.event_queue.put((TAG_SERIAL_FLUSH, b''), False)
def main_loop(self):
self.console_reader.start()
self.serial_reader.start()
try:
while self.console_reader.alive and self.serial_reader.alive:
(event_tag, data) = self.event_queue.get()
if event_tag == TAG_KEY:
self.handle_key(data)
elif event_tag == TAG_SERIAL:
self.handle_serial_input(data)
if self._invoke_processing_last_line_timer is not None:
self._invoke_processing_last_line_timer.cancel()
self._invoke_processing_last_line_timer = threading.Timer(0.1, self.invoke_processing_last_line)
self._invoke_processing_last_line_timer.start()
# If no futher data is received in the next short period
# of time then the _invoke_processing_last_line_timer
# generates an event which will result in the finishing of
# the last line. This is fix for handling lines sent
# without EOL.
elif event_tag == TAG_SERIAL_FLUSH:
self.handle_serial_input(data, finalize_line=True)
else:
raise RuntimeError("Bad event data %r" % ((event_tag,data),))
except SerialStopException:
sys.stderr.write(ANSI_NORMAL + "Stopping condition has been received\n")
finally:
try:
self.console_reader.stop()
self.serial_reader.stop()
# Cancelling _invoke_processing_last_line_timer is not
# important here because receiving empty data doesn't matter.
self._invoke_processing_last_line_timer = None
except Exception:
pass
sys.stderr.write(ANSI_NORMAL + "\n")
def handle_key(self, key):
if self._pressed_menu_key:
self.handle_menu_key(key)
self._pressed_menu_key = False
elif key == self.menu_key:
self._pressed_menu_key = True
elif key == self.exit_key:
self.console_reader.stop()
self.serial_reader.stop()
else:
try:
key = self.translate_eol(key)
self.serial.write(codecs.encode(key))
except serial.SerialException:
pass # this shouldn't happen, but sometimes port has closed in serial thread
except UnicodeEncodeError:
pass # this can happen if a non-ascii character was passed, ignoring
def handle_serial_input(self, data, finalize_line=False):
sp = data.split(b'\n')
if self._last_line_part != b"":
# add unprocessed part from previous "data" to the first line
sp[0] = self._last_line_part + sp[0]
self._last_line_part = b""
if sp[-1] != b"":
# last part is not a full line
self._last_line_part = sp.pop()
for line in sp:
if line != b"":
if self._serial_check_exit and line == self.exit_key.encode('latin-1'):
raise SerialStopException()
if self._output_enabled and (self._force_line_print or self._line_matcher.match(line.decode(errors="ignore"))):
self.console.write_bytes(line + b'\n')
self.handle_possible_pc_address_in_line(line)
self.check_gdbstub_trigger(line)
self._force_line_print = False
# Now we have the last part (incomplete line) in _last_line_part. By
# default we don't touch it and just wait for the arrival of the rest
# of the line. But after some time when we didn't received it we need
# to make a decision.
if self._last_line_part != b"":
if self._force_line_print or (finalize_line and self._line_matcher.match(self._last_line_part.decode(errors="ignore"))):
self._force_line_print = True
if self._output_enabled:
self.console.write_bytes(self._last_line_part)
self.handle_possible_pc_address_in_line(self._last_line_part)
self.check_gdbstub_trigger(self._last_line_part)
# It is possible that the incomplete line cuts in half the PC
# address. A small buffer is kept and will be used the next time
# handle_possible_pc_address_in_line is invoked to avoid this problem.
# MATCH_PCADDR matches 10 character long addresses. Therefore, we
# keep the last 9 characters.
self._pc_address_buffer = self._last_line_part[-9:]
# GDB sequence can be cut in half also. GDB sequence is 7
# characters long, therefore, we save the last 6 characters.
self._gdb_buffer = self._last_line_part[-6:]
self._last_line_part = b""
# else: keeping _last_line_part and it will be processed the next time
# handle_serial_input is invoked
def handle_possible_pc_address_in_line(self, line):
line = self._pc_address_buffer + line
self._pc_address_buffer = b""
for m in re.finditer(MATCH_PCADDR, line.decode(errors="ignore")):
self.lookup_pc_address(m.group())
def handle_menu_key(self, c):
if c == self.exit_key or c == self.menu_key: # send verbatim
self.serial.write(codecs.encode(c))
elif c in [CTRL_H, 'h', 'H', '?']:
red_print(self.get_help_text())
elif c == CTRL_R: # Reset device via RTS
self.serial.setRTS(True)
time.sleep(0.2)
self.serial.setRTS(False)
self.output_enable(True)
elif c == CTRL_F: # Recompile & upload
self.run_make("flash")
elif c == CTRL_A: # Recompile & upload app only
self.run_make("app-flash")
elif c == CTRL_Y: # Toggle output display
self.output_toggle()
elif c == CTRL_P:
yellow_print("Pause app (enter bootloader mode), press Ctrl-T Ctrl-R to restart")
# to fast trigger pause without press menu key
self.serial.setDTR(False) # IO0=HIGH
self.serial.setRTS(True) # EN=LOW, chip in reset
time.sleep(1.3) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.1
self.serial.setDTR(True) # IO0=LOW
self.serial.setRTS(False) # EN=HIGH, chip out of reset
time.sleep(0.45) # timeouts taken from esptool.py, includes esp32r0 workaround. defaults: 0.05
self.serial.setDTR(False) # IO0=HIGH, done
elif c in [CTRL_X, 'x', 'X']: # Exiting from within the menu
self.console_reader.stop()
self.serial_reader.stop()
else:
red_print('--- unknown menu character {} --'.format(key_description(c)))
def get_help_text(self):
return """
--- idf_monitor ({version}) - ESP-IDF monitor tool
--- based on miniterm from pySerial
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {reset:7} Reset target board via RTS line
--- {makecmd:7} Build & flash project
--- {appmake:7} Build & flash app only
--- {output:7} Toggle output display
--- {pause:7} Reset target into bootloader to pause app via RTS line
--- {menuexit:7} Exit program
""".format(version=__version__,
exit=key_description(self.exit_key),
menu=key_description(self.menu_key),
reset=key_description(CTRL_R),
makecmd=key_description(CTRL_F),
appmake=key_description(CTRL_A),
output=key_description(CTRL_Y),
pause=key_description(CTRL_P),
menuexit=key_description(CTRL_X) + ' (or X)')
def __enter__(self):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.serial_reader.stop()
self.console_reader.stop()
def __exit__(self, *args, **kwargs):
""" Use 'with self' to temporarily disable monitoring behaviour """
self.console_reader.start()
self.serial_reader.start()
def prompt_next_action(self, reason):
self.console.setup() # set up console to trap input characters
try:
red_print("""
--- {}
--- Press {} to exit monitor.
--- Press {} to build & flash project.
--- Press {} to build & flash app.
--- Press any other key to resume monitor (resets target).""".format(reason,
key_description(self.exit_key),
key_description(CTRL_F),
key_description(CTRL_A)))
k = CTRL_T # ignore CTRL-T here, so people can muscle-memory Ctrl-T Ctrl-F, etc.
while k == CTRL_T:
k = self.console.getkey()
finally:
self.console.cleanup()
if k == self.exit_key:
self.event_queue.put((TAG_KEY, k))
elif k in [CTRL_F, CTRL_A]:
self.event_queue.put((TAG_KEY, self.menu_key))
self.event_queue.put((TAG_KEY, k))
def run_make(self, target):
with self:
if isinstance(self.make, list):
popen_args = self.make + [target]
else:
popen_args = [self.make, target]
yellow_print("Running %s..." % " ".join(popen_args))
p = subprocess.Popen(popen_args)
try:
p.wait()
except KeyboardInterrupt:
p.wait()
if p.returncode != 0:
self.prompt_next_action("Build failed")
else:
self.output_enable(True)
def lookup_pc_address(self, pc_addr):
cmd = ["%saddr2line" % self.toolchain_prefix,
"-pfiaC", "-e", self.elf_file, pc_addr]
try:
translation = subprocess.check_output(cmd, cwd=".")
if b"?? ??:0" not in translation:
yellow_print(translation.decode())
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
def check_gdbstub_trigger(self, line):
line = self._gdb_buffer + line
self._gdb_buffer = b""
m = re.search(b"\\$(T..)#(..)", line) # look for a gdb "reason" for a break
if m is not None:
try:
chsum = sum(ord(bytes([p])) for p in m.group(1)) & 0xFF
calc_chsum = int(m.group(2), 16)
except ValueError:
return # payload wasn't valid hex digits
if chsum == calc_chsum:
self.run_gdb()
else:
red_print("Malformed gdb message... calculated checksum %02x received %02x" % (chsum, calc_chsum))
def run_gdb(self):
with self: # disable console control
sys.stderr.write(ANSI_NORMAL)
try:
cmd = ["%sgdb" % self.toolchain_prefix,
"-ex", "set serial baud %d" % self.serial.baudrate,
"-ex", "target remote %s" % self.serial.port,
"-ex", "interrupt", # monitor has already parsed the first 'reason' command, need a second
self.elf_file]
process = subprocess.Popen(cmd, cwd=".")
process.wait()
except OSError as e:
red_print("%s: %s" % (" ".join(cmd), e))
except KeyboardInterrupt:
pass # happens on Windows, maybe other OSes
finally:
try:
# on Linux, maybe other OSes, gdb sometimes seems to be alive even after wait() returns...
process.terminate()
except Exception:
pass
try:
# also on Linux, maybe other OSes, gdb sometimes exits uncleanly and breaks the tty mode
subprocess.call(["stty", "sane"])
except Exception:
pass # don't care if there's no stty, we tried...
self.prompt_next_action("gdb exited")
def output_enable(self, enable):
self._output_enabled = enable
def output_toggle(self):
self._output_enabled = not self._output_enabled
yellow_print("\nToggle output display: {}, Type Ctrl-T Ctrl-Y to show/disable output again.".format(self._output_enabled))
def main():
parser = argparse.ArgumentParser("idf_monitor - a serial output monitor for esp-idf")
parser.add_argument(
'--port', '-p',
help='Serial port device',
default=os.environ.get('ESPTOOL_PORT', '/dev/ttyUSB0')
)
parser.add_argument(
'--baud', '-b',
help='Serial port baud rate',
type=int,
default=os.environ.get('MONITOR_BAUD', 115200))
parser.add_argument(
'--make', '-m',
help='Command to run make',
type=str, default='make')
parser.add_argument(
'--toolchain-prefix',
help="Triplet prefix to add before cross-toolchain names",
default=DEFAULT_TOOLCHAIN_PREFIX)
parser.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="End of line to use when sending to the serial port",
default='CR')
parser.add_argument(
'elf_file', help='ELF file of application',
type=argparse.FileType('rb'))
parser.add_argument(
'--print_filter',
help="Filtering string",
default=DEFAULT_PRINT_FILTER)
args = parser.parse_args()
if args.port.startswith("/dev/tty."):
args.port = args.port.replace("/dev/tty.", "/dev/cu.")
yellow_print("--- WARNING: Serial ports accessed as /dev/tty.* will hang gdb if launched.")
yellow_print("--- Using %s instead..." % args.port)
serial_instance = serial.serial_for_url(args.port, args.baud,
do_not_open=True)
serial_instance.dtr = False
serial_instance.rts = False
args.elf_file.close() # don't need this as a file
# remove the parallel jobserver arguments from MAKEFLAGS, as any
# parent make is only running 1 job (monitor), so we can re-spawn
# all of the child makes we need (the -j argument remains part of
# MAKEFLAGS)
try:
makeflags = os.environ["MAKEFLAGS"]
makeflags = re.sub(r"--jobserver[^ =]*=[0-9,]+ ?", "", makeflags)
os.environ["MAKEFLAGS"] = makeflags
except KeyError:
pass # not running a make jobserver
monitor = Monitor(serial_instance, args.elf_file.name, args.print_filter, args.make, args.toolchain_prefix, args.eol)
yellow_print('--- idf_monitor on {p.name} {p.baudrate} ---'.format(
p=serial_instance))
yellow_print('--- Quit: {} | Menu: {} | Help: {} followed by {} ---'.format(
key_description(monitor.exit_key),
key_description(monitor.menu_key),
key_description(monitor.menu_key),
key_description(CTRL_H)))
if args.print_filter != DEFAULT_PRINT_FILTER:
yellow_print('--- Print filter: {} ---'.format(args.print_filter))
monitor.main_loop()
if os.name == 'nt':
# Windows console stuff
STD_OUTPUT_HANDLE = -11
STD_ERROR_HANDLE = -12
# wincon.h values
FOREGROUND_INTENSITY = 8
FOREGROUND_GREY = 7
# matches the ANSI color change sequences that IDF sends
RE_ANSI_COLOR = re.compile(b'\033\\[([01]);3([0-7])m')
# list mapping the 8 ANSI colors (the indexes) to Windows Console colors
ANSI_TO_WINDOWS_COLOR = [0, 4, 2, 6, 1, 5, 3, 7]
GetStdHandle = ctypes.windll.kernel32.GetStdHandle
SetConsoleTextAttribute = ctypes.windll.kernel32.SetConsoleTextAttribute
class ANSIColorConverter(object):
"""Class to wrap a file-like output stream, intercept ANSI color codes,
and convert them into calls to Windows SetConsoleTextAttribute.
Doesn't support all ANSI terminal code escape sequences, only the sequences IDF uses.
Ironically, in Windows this console output is normally wrapped by winpty which will then detect the console text
color changes and convert these back to ANSI color codes for MSYS' terminal to display. However this is the
least-bad working solution, as winpty doesn't support any "passthrough" mode for raw output.
"""
def __init__(self, output=None, decode_output=False):
self.output = output
self.decode_output = decode_output
self.handle = GetStdHandle(STD_ERROR_HANDLE if self.output == sys.stderr else STD_OUTPUT_HANDLE)
self.matched = b''
def _output_write(self, data):
try:
if self.decode_output:
self.output.write(data.decode())
else:
self.output.write(data)
except IOError:
# Windows 10 bug since the Fall Creators Update, sometimes writing to console randomly throws
# an exception (however, the character is still written to the screen)
# Ref https://github.com/espressif/esp-idf/issues/1136
pass
def write(self, data):
if isinstance(data, bytes):
data = bytearray(data)
else:
data = bytearray(data, 'utf-8')
for b in data:
b = bytes([b])
length = len(self.matched)
if b == b'\033': # ESC
self.matched = b
elif (length == 1 and b == b'[') or (1 < length < 7):
self.matched += b
if self.matched == ANSI_NORMAL.encode('latin-1'): # reset console
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, FOREGROUND_GREY)
self.matched = b''
elif len(self.matched) == 7: # could be an ANSI sequence
m = re.match(RE_ANSI_COLOR, self.matched)
if m is not None:
color = ANSI_TO_WINDOWS_COLOR[int(m.group(2))]
if m.group(1) == b'1':
color |= FOREGROUND_INTENSITY
# Flush is required only with Python3 - switching color before it is printed would mess up the console
self.flush()
SetConsoleTextAttribute(self.handle, color)
else:
self._output_write(self.matched) # not an ANSI color code, display verbatim
self.matched = b''
else:
self._output_write(b)
self.matched = b''
def flush(self):
self.output.flush()
if __name__ == "__main__":
main()
| 40.679151
| 133
| 0.570894
|
4a12e773efb93bc0f672c84e283ef896ff6b103d
| 598
|
py
|
Python
|
lutin_dollar-generate-form.py
|
atria-soft/dollar
|
63ecc46b10dcfb9e9949bee0765abdb693cbfa3b
|
[
"Apache-2.0"
] | null | null | null |
lutin_dollar-generate-form.py
|
atria-soft/dollar
|
63ecc46b10dcfb9e9949bee0765abdb693cbfa3b
|
[
"Apache-2.0"
] | null | null | null |
lutin_dollar-generate-form.py
|
atria-soft/dollar
|
63ecc46b10dcfb9e9949bee0765abdb693cbfa3b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import realog.debug as debug
import lutin.tools as tools
def get_type():
return "BINARY"
def get_sub_type():
return "TOOL"
def get_desc():
return "Dollar generator of all needed form to have the best text recognition"
def get_licence():
return "APACHE-2"
def get_compagny_type():
return "com"
def get_compagny_name():
return "atria-soft"
def get_maintainer():
return "authors.txt"
def configure(target, my_module):
my_module.add_src_file([
'tool/generate-form/main.cpp'
])
my_module.add_depend([
'dollar',
'test-debug',
])
return True
| 16.162162
| 79
| 0.704013
|
4a12e7cc1b654f19cdb5b12868c5b4ccfd304e15
| 5,785
|
py
|
Python
|
huaweicloud-sdk-as/huaweicloudsdkas/v1/model/list_scaling_policy_execute_logs_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | 1
|
2021-11-03T07:54:50.000Z
|
2021-11-03T07:54:50.000Z
|
huaweicloud-sdk-as/huaweicloudsdkas/v1/model/list_scaling_policy_execute_logs_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
huaweicloud-sdk-as/huaweicloudsdkas/v1/model/list_scaling_policy_execute_logs_response.py
|
wuchen-huawei/huaweicloud-sdk-python-v3
|
3683d703f4320edb2b8516f36f16d485cff08fc2
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
import pprint
import re
import six
from huaweicloudsdkcore.sdk_response import SdkResponse
class ListScalingPolicyExecuteLogsResponse(SdkResponse):
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'total_number': 'int',
'start_number': 'int',
'limit': 'int',
'scaling_policy_execute_log': 'list[ScalingPolicyExecuteLogList]'
}
attribute_map = {
'total_number': 'total_number',
'start_number': 'start_number',
'limit': 'limit',
'scaling_policy_execute_log': 'scaling_policy_execute_log'
}
def __init__(self, total_number=None, start_number=None, limit=None, scaling_policy_execute_log=None):
"""ListScalingPolicyExecuteLogsResponse - a model defined in huaweicloud sdk"""
super(ListScalingPolicyExecuteLogsResponse, self).__init__()
self._total_number = None
self._start_number = None
self._limit = None
self._scaling_policy_execute_log = None
self.discriminator = None
if total_number is not None:
self.total_number = total_number
if start_number is not None:
self.start_number = start_number
if limit is not None:
self.limit = limit
if scaling_policy_execute_log is not None:
self.scaling_policy_execute_log = scaling_policy_execute_log
@property
def total_number(self):
"""Gets the total_number of this ListScalingPolicyExecuteLogsResponse.
总记录数。
:return: The total_number of this ListScalingPolicyExecuteLogsResponse.
:rtype: int
"""
return self._total_number
@total_number.setter
def total_number(self, total_number):
"""Sets the total_number of this ListScalingPolicyExecuteLogsResponse.
总记录数。
:param total_number: The total_number of this ListScalingPolicyExecuteLogsResponse.
:type: int
"""
self._total_number = total_number
@property
def start_number(self):
"""Gets the start_number of this ListScalingPolicyExecuteLogsResponse.
查询的起始行号。
:return: The start_number of this ListScalingPolicyExecuteLogsResponse.
:rtype: int
"""
return self._start_number
@start_number.setter
def start_number(self, start_number):
"""Sets the start_number of this ListScalingPolicyExecuteLogsResponse.
查询的起始行号。
:param start_number: The start_number of this ListScalingPolicyExecuteLogsResponse.
:type: int
"""
self._start_number = start_number
@property
def limit(self):
"""Gets the limit of this ListScalingPolicyExecuteLogsResponse.
查询记录数。
:return: The limit of this ListScalingPolicyExecuteLogsResponse.
:rtype: int
"""
return self._limit
@limit.setter
def limit(self, limit):
"""Sets the limit of this ListScalingPolicyExecuteLogsResponse.
查询记录数。
:param limit: The limit of this ListScalingPolicyExecuteLogsResponse.
:type: int
"""
self._limit = limit
@property
def scaling_policy_execute_log(self):
"""Gets the scaling_policy_execute_log of this ListScalingPolicyExecuteLogsResponse.
伸缩策略执行日志列表。
:return: The scaling_policy_execute_log of this ListScalingPolicyExecuteLogsResponse.
:rtype: list[ScalingPolicyExecuteLogList]
"""
return self._scaling_policy_execute_log
@scaling_policy_execute_log.setter
def scaling_policy_execute_log(self, scaling_policy_execute_log):
"""Sets the scaling_policy_execute_log of this ListScalingPolicyExecuteLogsResponse.
伸缩策略执行日志列表。
:param scaling_policy_execute_log: The scaling_policy_execute_log of this ListScalingPolicyExecuteLogsResponse.
:type: list[ScalingPolicyExecuteLogList]
"""
self._scaling_policy_execute_log = scaling_policy_execute_log
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ListScalingPolicyExecuteLogsResponse):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 30.130208
| 119
| 0.628349
|
4a12e843974c3693781889e1549b5aedf1fcc0ed
| 1,483
|
py
|
Python
|
train_mn.py
|
Erdene-Ochir0417/DialoGPT
|
ed7af892c6ecc63beba33010cda4e28b9b0d1aa9
|
[
"MIT"
] | null | null | null |
train_mn.py
|
Erdene-Ochir0417/DialoGPT
|
ed7af892c6ecc63beba33010cda4e28b9b0d1aa9
|
[
"MIT"
] | null | null | null |
train_mn.py
|
Erdene-Ochir0417/DialoGPT
|
ed7af892c6ecc63beba33010cda4e28b9b0d1aa9
|
[
"MIT"
] | null | null | null |
import subprocess as sp
import os
import sys
PROJECT_FOLDER = "."
MODEL_FOLDER = "./models"
PYTHON_EXE = 'python'
target_folder = "./models/small"
MAX_LEN = 128
data_path = './data/train.tsv'
data_db = f'{data_path[:-4]}.{MAX_LEN}len.db'
train_cmd = 'LSP_train_mn.py'
args = [
'--model_name_or_path', target_folder,
'--init_checkpoint', os.path.join(target_folder, 'pytorch_model.bin'),
'--train_input_file', data_db , # file from last step
'--eval_input_file', './data/test.tsv', # dummy test data
'--output_dir', os.path.join(MODEL_FOLDER, 'output_model'),
'--seed', '42',
'--max_seq_length', '128',
'--train_batch_size', '8',
'--gradient_accumulation_steps', '8',
'--eval_batch_size', '8',
'--learning_rate', '1e-5',
'--num_optim_steps', '10000',
'--valid_step', '5000',
'--warmup_steps', '4000',
'--normalize_data', 'true',
'--fp16', 'false',
'--lr_schedule', 'noam',
'--loss_scale', '0.0',
'--no_token_id', 'true',
'--pbar', 'true'
]
arg = ' '.join(args)
train_cmd = train_cmd + ' ' + arg
print(PYTHON_EXE + ' ' +train_cmd)
print('#########################################################################')
with open('./output.log', 'wb') as f:
process = sp.Popen([PYTHON_EXE] + train_cmd.split(' '), stdout=sp.PIPE, stderr=sp.STDOUT, cwd=PROJECT_FOLDER)
for line in iter(process.stdout.readline, b''):
sys.stdout.write(line.decode(sys.stdout.encoding))
f.write(line)
| 34.488372
| 113
| 0.594066
|
4a12e856f8b1c30408d46bfcfc59e71c78596be0
| 470
|
py
|
Python
|
vsco-parser.py
|
gebeto/python
|
09c00741d66a171740d15f61c6051bdde828a979
|
[
"MIT"
] | 1
|
2021-12-07T01:10:21.000Z
|
2021-12-07T01:10:21.000Z
|
vsco-parser.py
|
gebeto/python
|
09c00741d66a171740d15f61c6051bdde828a979
|
[
"MIT"
] | null | null | null |
vsco-parser.py
|
gebeto/python
|
09c00741d66a171740d15f61c6051bdde828a979
|
[
"MIT"
] | null | null | null |
import requests
filters = []
url = "https://camstore.vsco.co/2.1/camstore/ios?app_id=77357429-48A7-4E5A-A2AF-5D8C6175EFFC&app_version=v4.5.6%20%282040%29&device_model=iPhone%205s&email=bboyheadman%40gmail.com&os_version=9.2"
resp = requests.get(url).json()
for each in resp["products"]:
try:
for eacheach in each["presets"]:
print eacheach["key"]
filters.append(str(eacheach["key"]))
except:
pass
open('vsco.txt','w').write(str(filters).replace("'", '"'))
| 31.333333
| 195
| 0.714894
|
4a12e98f18c18e6d091e8186fc446780d9f594b0
| 8,905
|
py
|
Python
|
pdf2word/test.py
|
hoslo/ocr
|
4f78ae7013beb2cab8fb9391ba25ba5e6e78967c
|
[
"Apache-2.0"
] | 4
|
2019-05-27T10:23:55.000Z
|
2020-01-19T10:03:14.000Z
|
pdf2word/test.py
|
dun933/ocr
|
4f78ae7013beb2cab8fb9391ba25ba5e6e78967c
|
[
"Apache-2.0"
] | null | null | null |
pdf2word/test.py
|
dun933/ocr
|
4f78ae7013beb2cab8fb9391ba25ba5e6e78967c
|
[
"Apache-2.0"
] | 3
|
2019-08-16T18:24:02.000Z
|
2020-05-15T06:35:45.000Z
|
import os
# from extract_table_1 import extract_table
from pdf2word.pdf_table import generate_table, extract_table
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import cv2
import fitz
import pickle
import numpy as np
from io import open
from PIL import Image
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
# from pdfminer.pdfinterp import process_pdf
from pdfminer.converter import TextConverter, PDFPageAggregator
from pdfminer.layout import LAParams, LTTextBoxHorizontal, LTImage, LTFigure, LTRect, LTLine
from pdfminer.pdfparser import PDFDocument, PDFParser
# from pdf_word.pdf_table import extract_table, generate_table
from pdf2word.layout_test_pdf import sort_paragraph, save2docx
# def pdf_pages(file_path):
# with open(file_path, 'rb') as file:
# praser = PDFParser(file)
# # 创建一个PDF文档
# doc = PDFDocument()
# # 连接分析器 与文档对象
# praser.set_document(doc)
# doc.set_parser(praser)
# # 提供初始化密码
# # 如果没有密码 就创建一个空的字符串
# doc.initialize()
# # print('is_extractable', doc.is_extractable)
# # 创建PDf 资源管理器 来管理共享资源
# rsrcmgr = PDFResourceManager()
# # 创建一个PDF设备对象
# laparams = LAParams()
# device = PDFPageAggregator(rsrcmgr, laparams=laparams)
# # 创建一个PDF解释器对象
# interpreter = PDFPageInterpreter(rsrcmgr, device)
#
# page = list(doc.get_pages())
# return len(page)
def pdf_is_text(file_path):
# TODO
return False
try:
with open(file_path, 'rb') as file:
praser = PDFParser(file)
# 创建一个PDF文档
doc = PDFDocument()
# 连接分析器 与文档对象
praser.set_document(doc)
doc.set_parser(praser)
# 提供初始化密码
# 如果没有密码 就创建一个空的字符串
doc.initialize()
# 判断是否加密
if doc.encryption:
# pdf = fitz.Document(file_path)
# pdf.save('fitz_decrypt.pdf')
with open('fitz_decrypt.pdf', 'rb') as f:
praser = PDFParser(f)
doc = PDFDocument()
praser.set_document(doc)
doc.set_parser(praser)
doc.initialize()
# print('is_extractable', doc.is_extractable)
# 创建PDf 资源管理器 来管理共享资源
rsrcmgr = PDFResourceManager()
# 创建一个PDF设备对象
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
# 创建一个PDF解释器对象
interpreter = PDFPageInterpreter(rsrcmgr, device)
# 循环遍历列表,每次处理一个page的内容
first_three = [0, 0, 0]
for index, page in enumerate(doc.get_pages()): # doc.get_pages() 获取page列表
if index < 3:
interpreter.process_page(page)
# 接受该页面的LTPage对象
layout = device.get_result()
for i in layout:
if isinstance(i, LTTextBoxHorizontal):
print(i.get_text())
first_three[index] += len(i.get_text())
else:
break
# print(first_three)
# 如果前三页字数相同且少于50则判断为图片类PDF
if max(first_three) < 50: # == min(first_three) and first_three[0]
return False
else:
return True
except Exception as ex:
return False
def read_from_pdf(file_path, page):
with open(file_path, 'rb') as file:
praser = PDFParser(file)
# 创建一个PDF文档
doc = PDFDocument()
# 连接分析器 与文档对象
praser.set_document(doc)
doc.set_parser(praser)
# 提供初始化密码
# 如果没有密码 就创建一个空的字符串
doc.initialize()
# 判断是否加密
if doc.encryption:
pdf = fitz.Document(file_path)
pdf.save('fitz_decrypt.pdf')
with open('fitz_decrypt.pdf', 'rb') as f:
praser = PDFParser(f)
doc = PDFDocument()
praser.set_document(doc)
doc.set_parser(praser)
doc.initialize()
# print('is_extractable', doc.is_extractable)
# 创建PDf 资源管理器 来管理共享资源
rsrcmgr = PDFResourceManager()
# 创建一个PDF设备对象
laparams = LAParams()
device = PDFPageAggregator(rsrcmgr, laparams=laparams)
# 创建一个PDF解释器对象
interpreter = PDFPageInterpreter(rsrcmgr, device)
page = list(doc.get_pages())[page]
interpreter.process_page(page)
# 接受该页面的LTPage对象
layout = device.get_result()
result_data = []
# data_all = [[], []]
im_white = np.zeros((842, 595, 3), np.uint8)
im_white = 255 - im_white
img_data = []
for x in layout:
pos = [int(x.x0), int(842.25 - x.y0), int(x.x1), int(842.25 - x.y0), int(x.x0), int(842.25 - x.y1),
int(x.x1), int(842.25 - x.y1), 0]
if isinstance(x, LTRect):
cv2.rectangle(im_white, (int(x.x0), int(842.25 - x.y0)), (int(x.x1), int(842.25 - x.y1)), (255, 0, 0), 2)
if isinstance(x, LTImage):
img_data.append([pos[0], pos[1], pos[6], pos[7]])
elif isinstance(x, LTFigure):
img_data.append([pos[0], pos[1], pos[6], pos[7]])
elif isinstance(x, LTTextBoxHorizontal):
# print(x.get_text())
for obj in x._objs:
for i in obj:
try:
x0, y0, x1, y1 = int(i.x0), int(842.25 - i.y0), int(i.x1), int(842.25 - i.y1)
result_data.append([i._text, [x0, y0, x1, y1]])
except:
pass
return result_data, img_data, im_white # data_all,
def extract_pdf_text(pdf_path, page):
data, im_data, im_white = read_from_pdf(pdf_path, page)
data = sorted(data, key=lambda x: x[1][1])
pdf = fitz.open(pdf_path)
page_fitz = pdf[page]
trans = fitz.Matrix(3, 3).preRotate(0)
pm = page_fitz.getPixmap(matrix=trans, alpha=False)
img = Image.frombytes("RGB", [pm.width, pm.height], pm.samples)
if img.height > img.width:
img.thumbnail((595.5, 842.25), Image.ANTIALIAS)
else:
img.thumbnail((842.25, 595.5), Image.ANTIALIAS)
tables = extract_table(np.array(img))
if tables == 'not table':
tables = []
table_data = []
for i in data:
if tables:
cen_point = 0.5 * (i[1][1] + i[1][3])
flg = False
for table in tables:
table_pos = table[0][-1]
if table_pos[1] < cen_point < table_pos[1] + table_pos[3]:
flg = True
if flg:
table_data.append(i)
text_data = [i for i in data if i not in table_data]
new_text_data = []
for index, i in enumerate(text_data):
if index == 0:
new_text_data.append([i])
else:
if i[1][1] - new_text_data[-1][-1][1][1] < 2:
new_text_data[-1].append(i)
else:
new_text_data.append([i])
new_text_data = [sorted(i, key=lambda x: x[1][0]) for i in new_text_data]
new_text_data = [[''.join([j[0] for j in i]), [i[0][1][0], i[0][1][1], i[-1][1][2], i[0][1][1], i[0][1][0],
i[0][1][3], i[-1][1][2], i[0][1][3], 0]] for i in new_text_data]
new_text_data = [[[np.array(i[1]), np.array(i[1])], i[0]] for i in new_text_data]
for table in tables:
table_index = 0
for index, i in enumerate(new_text_data):
if i[0] == 'table':
if table[0][1][1] > i[1][3][1]:
table_index = index + 1
elif table[0][1][1] > i[0][0][1]:
table_index = index + 1
new_text_data.insert(table_index, ['table', generate_table(table, table_data=table_data)])
data = [new_text_data, im_data]
result = sort_paragraph(img, data)
return result, img
if __name__ == '__main__':
from docx import Document
from docx.oxml.ns import qn
doc = Document()
doc.styles['Normal'].font.name = u'宋体'
doc.styles['Normal']._element.rPr.rFonts.set(qn('w:eastAsia'), u'宋体')
pdf_path = r'F:\数据\txt_pdfs\16550822.pdf'
start_page, end_page = 0, 1
for page in range(start_page, end_page): # pdf_pages(pdf_path)
print(page)
print(pdf_is_text(pdf_path))
result, img = extract_pdf_text(pdf_path, page)
print(result)
doc = save2docx(doc, result, img, page)
if page == end_page-1:
continue
doc.add_section()
doc.save('aaa2.docx')
# for i in os.listdir(r'F:\数据\txt_pdfs'):
# if not pdf_is_text(r'F:\数据\txt_pdfs/' + i):
# print(i)
# print(pdf_is_text(pdf_path))
| 34.382239
| 121
| 0.545536
|
4a12eb79f9fff950147c61c2aa182e1fc7f00ff8
| 34,679
|
py
|
Python
|
KLDD_alpha/layout/layout.py
|
shihchiehlin1224/Kuo-Lin-Dynamical-Diffraction
|
386ad440c0ca6e2bfedc17b2517ce4b95af73c22
|
[
"Zlib"
] | 2
|
2019-06-26T07:21:08.000Z
|
2019-06-26T07:44:39.000Z
|
KLDD_alpha/layout/layout.py
|
tandent1985/Kuo-Lin-Dynamical-Diffraction
|
386ad440c0ca6e2bfedc17b2517ce4b95af73c22
|
[
"Zlib"
] | null | null | null |
KLDD_alpha/layout/layout.py
|
tandent1985/Kuo-Lin-Dynamical-Diffraction
|
386ad440c0ca6e2bfedc17b2517ce4b95af73c22
|
[
"Zlib"
] | 1
|
2019-11-01T21:31:45.000Z
|
2019-11-01T21:31:45.000Z
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'KLDD_layout_n.ui'
#
# Created by: PyQt5 UI code generator 5.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(805, 684)
MainWindow.setMinimumSize(QtCore.QSize(805, 684))
MainWindow.setMaximumSize(QtCore.QSize(805, 684))
MainWindow.setTabShape(QtWidgets.QTabWidget.Rounded)
self.KLDD_widget = QtWidgets.QWidget(MainWindow)
self.KLDD_widget.setMinimumSize(QtCore.QSize(805, 639))
self.KLDD_widget.setMaximumSize(QtCore.QSize(805, 640))
self.KLDD_widget.setObjectName("KLDD_widget")
self.gridLayout = QtWidgets.QGridLayout(self.KLDD_widget)
self.gridLayout.setObjectName("gridLayout")
self.frame_3 = QtWidgets.QFrame(self.KLDD_widget)
self.frame_3.setMinimumSize(QtCore.QSize(490, 300))
self.frame_3.setMaximumSize(QtCore.QSize(490, 300))
self.frame_3.setFrameShape(QtWidgets.QFrame.WinPanel)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame_3.setLineWidth(1)
self.frame_3.setObjectName("frame_3")
self.verticalLayout = QtWidgets.QVBoxLayout(self.frame_3)
self.verticalLayout.setContentsMargins(3, 0, 3, 0)
self.verticalLayout.setSpacing(0)
self.verticalLayout.setObjectName("verticalLayout")
self.widget_7 = QtWidgets.QWidget(self.frame_3)
self.widget_7.setMaximumSize(QtCore.QSize(16777215, 50))
self.widget_7.setObjectName("widget_7")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.widget_7)
self.horizontalLayout_3.setContentsMargins(-1, 5, -1, 5)
self.horizontalLayout_3.setSpacing(5)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.label_3 = QtWidgets.QLabel(self.widget_7)
self.label_3.setMaximumSize(QtCore.QSize(16777215, 30))
self.label_3.setTextFormat(QtCore.Qt.RichText)
self.label_3.setScaledContents(True)
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.horizontalLayout_3.addWidget(self.label_3)
self.push_simu_REF = QtWidgets.QPushButton(self.widget_7)
self.push_simu_REF.setMinimumSize(QtCore.QSize(100, 30))
self.push_simu_REF.setMaximumSize(QtCore.QSize(100, 30))
self.push_simu_REF.setAutoFillBackground(True)
self.push_simu_REF.setObjectName("push_simu_REF")
self.horizontalLayout_3.addWidget(self.push_simu_REF)
self.bt_load_ref = QtWidgets.QPushButton(self.widget_7)
self.bt_load_ref.setMinimumSize(QtCore.QSize(120, 30))
self.bt_load_ref.setMaximumSize(QtCore.QSize(120, 30))
self.bt_load_ref.setObjectName("bt_load_ref")
self.horizontalLayout_3.addWidget(self.bt_load_ref)
spacerItem1 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem1)
self.verticalLayout.addWidget(self.widget_7)
self.gridLayout.addWidget(self.frame_3, 0, 1, 1, 1)
self.widget_14 = QtWidgets.QWidget(self.KLDD_widget)
self.widget_14.setMinimumSize(QtCore.QSize(250, 300))
self.widget_14.setMaximumSize(QtCore.QSize(270, 300))
self.widget_14.setObjectName("widget_14")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.widget_14)
self.verticalLayout_7.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_7.setSpacing(5)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.frame_6 = QtWidgets.QFrame(self.widget_14)
self.frame_6.setMinimumSize(QtCore.QSize(250, 0))
self.frame_6.setMaximumSize(QtCore.QSize(270, 70))
self.frame_6.setFrameShape(QtWidgets.QFrame.WinPanel)
self.frame_6.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame_6.setLineWidth(1)
self.frame_6.setMidLineWidth(0)
self.frame_6.setObjectName("frame_6")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.frame_6)
self.verticalLayout_6.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_6.setSpacing(5)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.label = QtWidgets.QLabel(self.frame_6)
self.label.setMinimumSize(QtCore.QSize(0, 27))
self.label.setMaximumSize(QtCore.QSize(16777215, 27))
self.label.setAlignment(QtCore.Qt.AlignBottom|QtCore.Qt.AlignHCenter)
self.label.setWordWrap(True)
self.label.setIndent(0)
self.label.setOpenExternalLinks(False)
self.label.setObjectName("label")
self.verticalLayout_6.addWidget(self.label)
self.widget_5 = QtWidgets.QWidget(self.frame_6)
self.widget_5.setMinimumSize(QtCore.QSize(0, 35))
self.widget_5.setMaximumSize(QtCore.QSize(16777215, 35))
self.widget_5.setObjectName("widget_5")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.widget_5)
self.horizontalLayout.setContentsMargins(5, 5, 5, 5)
self.horizontalLayout.setSpacing(5)
self.horizontalLayout.setObjectName("horizontalLayout")
self.show_load = QtWidgets.QLineEdit(self.widget_5)
self.show_load.setLayoutDirection(QtCore.Qt.RightToLeft)
self.show_load.setAlignment(QtCore.Qt.AlignCenter)
self.show_load.setObjectName("show_load")
self.horizontalLayout.addWidget(self.show_load)
self.browse_file = QtWidgets.QPushButton(self.widget_5)
self.browse_file.setObjectName("browse_file")
self.horizontalLayout.addWidget(self.browse_file)
self.verticalLayout_6.addWidget(self.widget_5)
self.verticalLayout_7.addWidget(self.frame_6)
self.frame = QtWidgets.QFrame(self.widget_14)
self.frame.setMinimumSize(QtCore.QSize(250, 0))
self.frame.setMaximumSize(QtCore.QSize(270, 16777215))
self.frame.setFrameShape(QtWidgets.QFrame.WinPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame.setLineWidth(1)
self.frame.setObjectName("frame")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.frame)
self.verticalLayout_4.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_4.setSpacing(5)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.label_5 = QtWidgets.QLabel(self.frame)
self.label_5.setMinimumSize(QtCore.QSize(0, 20))
self.label_5.setMaximumSize(QtCore.QSize(16777215, 20))
self.label_5.setTextFormat(QtCore.Qt.RichText)
self.label_5.setAlignment(QtCore.Qt.AlignCenter)
self.label_5.setObjectName("label_5")
self.verticalLayout_4.addWidget(self.label_5)
self.widget_6 = QtWidgets.QWidget(self.frame)
self.widget_6.setMinimumSize(QtCore.QSize(0, 25))
self.widget_6.setMaximumSize(QtCore.QSize(16777215, 25))
self.widget_6.setObjectName("widget_6")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.widget_6)
self.horizontalLayout_2.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.Bragg_h = QtWidgets.QLineEdit(self.widget_6)
self.Bragg_h.setLayoutDirection(QtCore.Qt.LeftToRight)
self.Bragg_h.setAlignment(QtCore.Qt.AlignCenter)
self.Bragg_h.setObjectName("Bragg_h")
self.horizontalLayout_2.addWidget(self.Bragg_h)
self.Bragg_k = QtWidgets.QLineEdit(self.widget_6)
self.Bragg_k.setAlignment(QtCore.Qt.AlignCenter)
self.Bragg_k.setObjectName("Bragg_k")
self.horizontalLayout_2.addWidget(self.Bragg_k)
self.Bragg_l = QtWidgets.QLineEdit(self.widget_6)
self.Bragg_l.setLayoutDirection(QtCore.Qt.RightToLeft)
self.Bragg_l.setAlignment(QtCore.Qt.AlignCenter)
self.Bragg_l.setObjectName("Bragg_l")
self.horizontalLayout_2.addWidget(self.Bragg_l)
self.Bragg_k.raise_()
self.Bragg_l.raise_()
self.Bragg_h.raise_()
self.verticalLayout_4.addWidget(self.widget_6)
self.widget_9 = QtWidgets.QWidget(self.frame)
self.widget_9.setMinimumSize(QtCore.QSize(0, 17))
self.widget_9.setMaximumSize(QtCore.QSize(16777215, 20))
self.widget_9.setObjectName("widget_9")
self.horizontalLayout_9 = QtWidgets.QHBoxLayout(self.widget_9)
self.horizontalLayout_9.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_9.setSpacing(0)
self.horizontalLayout_9.setObjectName("horizontalLayout_9")
self.label_7 = QtWidgets.QLabel(self.widget_9)
self.label_7.setMinimumSize(QtCore.QSize(0, 20))
self.label_7.setMaximumSize(QtCore.QSize(16777215, 20))
self.label_7.setAlignment(QtCore.Qt.AlignCenter)
self.label_7.setObjectName("label_7")
self.horizontalLayout_9.addWidget(self.label_7)
self.label_9 = QtWidgets.QLabel(self.widget_9)
self.label_9.setMinimumSize(QtCore.QSize(0, 20))
self.label_9.setAlignment(QtCore.Qt.AlignCenter)
self.label_9.setIndent(0)
self.label_9.setObjectName("label_9")
self.horizontalLayout_9.addWidget(self.label_9)
self.verticalLayout_4.addWidget(self.widget_9)
self.widget_10 = QtWidgets.QWidget(self.frame)
self.widget_10.setMinimumSize(QtCore.QSize(0, 23))
self.widget_10.setMaximumSize(QtCore.QSize(16777215, 22))
self.widget_10.setObjectName("widget_10")
self.horizontalLayout_8 = QtWidgets.QHBoxLayout(self.widget_10)
self.horizontalLayout_8.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_8.setObjectName("horizontalLayout_8")
self.photon_energy = QtWidgets.QLineEdit(self.widget_10)
self.photon_energy.setMinimumSize(QtCore.QSize(105, 21))
self.photon_energy.setAlignment(QtCore.Qt.AlignCenter)
self.photon_energy.setPlaceholderText("eV")
self.photon_energy.setObjectName("photon_energy")
self.horizontalLayout_8.addWidget(self.photon_energy)
self.choose_mode = QtWidgets.QComboBox(self.widget_10)
self.choose_mode.setMinimumSize(QtCore.QSize(110, 27))
self.choose_mode.setStatusTip("")
self.choose_mode.setLayoutDirection(QtCore.Qt.LeftToRight)
self.choose_mode.setObjectName("choose_mode")
self.choose_mode.addItem("")
self.choose_mode.addItem("")
self.horizontalLayout_8.addWidget(self.choose_mode)
self.verticalLayout_4.addWidget(self.widget_10)
self.label_6 = QtWidgets.QLabel(self.frame)
self.label_6.setMinimumSize(QtCore.QSize(0, 20))
self.label_6.setMaximumSize(QtCore.QSize(16777215, 20))
self.label_6.setAlignment(QtCore.Qt.AlignCenter)
self.label_6.setObjectName("label_6")
self.verticalLayout_4.addWidget(self.label_6)
self.widget_11 = QtWidgets.QWidget(self.frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Preferred)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.widget_11.sizePolicy().hasHeightForWidth())
self.widget_11.setSizePolicy(sizePolicy)
self.widget_11.setMinimumSize(QtCore.QSize(0, 25))
self.widget_11.setMaximumSize(QtCore.QSize(16777215, 25))
self.widget_11.setObjectName("widget_11")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.widget_11)
self.horizontalLayout_5.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.range_start = QtWidgets.QLineEdit(self.widget_11)
self.range_start.setLayoutDirection(QtCore.Qt.LeftToRight)
self.range_start.setAlignment(QtCore.Qt.AlignCenter)
self.range_start.setObjectName("range_start")
self.horizontalLayout_5.addWidget(self.range_start)
self.range_end = QtWidgets.QLineEdit(self.widget_11)
self.range_end.setAlignment(QtCore.Qt.AlignCenter)
self.range_end.setObjectName("range_end")
self.horizontalLayout_5.addWidget(self.range_end)
self.step_size = QtWidgets.QLineEdit(self.widget_11)
self.step_size.setLayoutDirection(QtCore.Qt.RightToLeft)
self.step_size.setAlignment(QtCore.Qt.AlignCenter)
self.step_size.setObjectName("step_size")
self.horizontalLayout_5.addWidget(self.step_size)
self.verticalLayout_4.addWidget(self.widget_11)
self.widget_13 = QtWidgets.QWidget(self.frame)
self.widget_13.setMaximumSize(QtCore.QSize(16777215, 20))
self.widget_13.setObjectName("widget_13")
self.horizontalLayout_7 = QtWidgets.QHBoxLayout(self.widget_13)
self.horizontalLayout_7.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_7.setObjectName("horizontalLayout_7")
self.label_8 = QtWidgets.QLabel(self.widget_13)
self.label_8.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_8.setAlignment(QtCore.Qt.AlignCenter)
self.label_8.setObjectName("label_8")
self.horizontalLayout_7.addWidget(self.label_8)
self.label_2 = QtWidgets.QLabel(self.widget_13)
self.label_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_2.setObjectName("label_2")
self.horizontalLayout_7.addWidget(self.label_2)
self.verticalLayout_4.addWidget(self.widget_13)
self.widget_12 = QtWidgets.QWidget(self.frame)
self.widget_12.setMinimumSize(QtCore.QSize(100, 25))
self.widget_12.setMaximumSize(QtCore.QSize(16777215, 25))
self.widget_12.setObjectName("widget_12")
self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.widget_12)
self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_6.setObjectName("horizontalLayout_6")
self.polar_angle = QtWidgets.QLineEdit(self.widget_12)
self.polar_angle.setMinimumSize(QtCore.QSize(105, 21))
self.polar_angle.setAlignment(QtCore.Qt.AlignCenter)
self.polar_angle.setObjectName("polar_angle")
self.horizontalLayout_6.addWidget(self.polar_angle)
self.take_off_angle = QtWidgets.QLineEdit(self.widget_12)
self.take_off_angle.setMinimumSize(QtCore.QSize(105, 21))
self.take_off_angle.setLayoutDirection(QtCore.Qt.RightToLeft)
self.take_off_angle.setAlignment(QtCore.Qt.AlignCenter)
self.take_off_angle.setObjectName("take_off_angle")
self.horizontalLayout_6.addWidget(self.take_off_angle)
self.verticalLayout_4.addWidget(self.widget_12)
self.verticalLayout_7.addWidget(self.frame)
self.gridLayout.addWidget(self.widget_14, 0, 0, 1, 1)
self.widget_2 = QtWidgets.QWidget(self.KLDD_widget)
self.widget_2.setMinimumSize(QtCore.QSize(250, 300))
self.widget_2.setMaximumSize(QtCore.QSize(270, 300))
self.widget_2.setObjectName("widget_2")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.widget_2)
self.verticalLayout_5.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_5.setSpacing(5)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.frame_7 = QtWidgets.QFrame(self.widget_2)
self.frame_7.setMinimumSize(QtCore.QSize(250, 60))
self.frame_7.setMaximumSize(QtCore.QSize(270, 60))
self.frame_7.setFrameShape(QtWidgets.QFrame.WinPanel)
self.frame_7.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame_7.setLineWidth(1)
self.frame_7.setMidLineWidth(0)
self.frame_7.setObjectName("frame_7")
self.verticalLayout_8 = QtWidgets.QVBoxLayout(self.frame_7)
self.verticalLayout_8.setContentsMargins(5, 0, 5, 5)
self.verticalLayout_8.setSpacing(4)
self.verticalLayout_8.setObjectName("verticalLayout_8")
self.widget_15 = QtWidgets.QWidget(self.frame_7)
self.widget_15.setMinimumSize(QtCore.QSize(0, 35))
self.widget_15.setMaximumSize(QtCore.QSize(16777215, 35))
self.widget_15.setObjectName("widget_15")
self.horizontalLayout_11 = QtWidgets.QHBoxLayout(self.widget_15)
self.horizontalLayout_11.setContentsMargins(5, 5, 5, 5)
self.horizontalLayout_11.setSpacing(5)
self.horizontalLayout_11.setObjectName("horizontalLayout_11")
self.label_10 = QtWidgets.QLabel(self.widget_15)
self.label_10.setMaximumSize(QtCore.QSize(16777215, 23))
self.label_10.setSizeIncrement(QtCore.QSize(0, 27))
self.label_10.setAlignment(QtCore.Qt.AlignCenter)
self.label_10.setObjectName("label_10")
self.horizontalLayout_11.addWidget(self.label_10)
self.verticalLayout_8.addWidget(self.widget_15)
self.widget_16 = QtWidgets.QWidget(self.frame_7)
self.widget_16.setMinimumSize(QtCore.QSize(100, 25))
self.widget_16.setMaximumSize(QtCore.QSize(16777215, 25))
self.widget_16.setObjectName("widget_16")
self.horizontalLayout_10 = QtWidgets.QHBoxLayout(self.widget_16)
self.horizontalLayout_10.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_10.setObjectName("horizontalLayout_10")
self.DWF = QtWidgets.QLineEdit(self.widget_16)
self.DWF.setMinimumSize(QtCore.QSize(105, 21))
self.DWF.setAlignment(QtCore.Qt.AlignCenter)
self.DWF.setObjectName("DWF")
self.horizontalLayout_10.addWidget(self.DWF)
self.sigma = QtWidgets.QLineEdit(self.widget_16)
self.sigma.setMinimumSize(QtCore.QSize(105, 21))
self.sigma.setLayoutDirection(QtCore.Qt.RightToLeft)
self.sigma.setAlignment(QtCore.Qt.AlignCenter)
self.sigma.setObjectName("sigma")
self.horizontalLayout_10.addWidget(self.sigma)
self.verticalLayout_8.addWidget(self.widget_16)
self.verticalLayout_5.addWidget(self.frame_7)
self.frame_2 = QtWidgets.QFrame(self.widget_2)
self.frame_2.setFrameShape(QtWidgets.QFrame.WinPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame_2.setObjectName("frame_2")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.frame_2)
self.verticalLayout_3.setContentsMargins(5, 5, 5, 5)
self.verticalLayout_3.setSpacing(5)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.widget = QtWidgets.QWidget(self.frame_2)
self.widget.setObjectName("widget")
self.formLayout = QtWidgets.QFormLayout(self.widget)
self.formLayout.setFieldGrowthPolicy(QtWidgets.QFormLayout.FieldsStayAtSizeHint)
self.formLayout.setLabelAlignment(QtCore.Qt.AlignCenter)
self.formLayout.setFormAlignment(QtCore.Qt.AlignCenter)
self.formLayout.setContentsMargins(0, 0, 0, 0)
self.formLayout.setHorizontalSpacing(5)
self.formLayout.setVerticalSpacing(1)
self.formLayout.setObjectName("formLayout")
self.label_13 = QtWidgets.QLabel(self.widget)
self.label_13.setMinimumSize(QtCore.QSize(123, 21))
self.label_13.setMaximumSize(QtCore.QSize(123, 21))
self.label_13.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_13.setObjectName("label_13")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.label_13)
self.label_11 = QtWidgets.QLabel(self.widget)
self.label_11.setMinimumSize(QtCore.QSize(123, 21))
self.label_11.setScaledContents(False)
self.label_11.setAlignment(QtCore.Qt.AlignCenter)
self.label_11.setObjectName("label_11")
self.formLayout.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.label_11)
self.unit_cell_spinBox = QtWidgets.QSpinBox(self.widget)
self.unit_cell_spinBox.setMinimumSize(QtCore.QSize(115, 24))
self.unit_cell_spinBox.setMaximumSize(QtCore.QSize(130, 24))
self.unit_cell_spinBox.setWrapping(True)
self.unit_cell_spinBox.setMaximum(99)
self.unit_cell_spinBox.setProperty("value", 1)
self.unit_cell_spinBox.setObjectName("unit_cell_spinBox")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.unit_cell_spinBox)
self.IMFP_lineEdit = QtWidgets.QLineEdit(self.widget)
self.IMFP_lineEdit.setMinimumSize(QtCore.QSize(123, 21))
self.IMFP_lineEdit.setMaximumSize(QtCore.QSize(123, 21))
self.IMFP_lineEdit.setAlignment(QtCore.Qt.AlignCenter)
self.IMFP_lineEdit.setObjectName("IMFP_lineEdit")
self.formLayout.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.IMFP_lineEdit)
self.verticalLayout_3.addWidget(self.widget)
self.line = QtWidgets.QFrame(self.frame_2)
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.verticalLayout_3.addWidget(self.line)
self.widget_21 = QtWidgets.QWidget(self.frame_2)
self.widget_21.setMinimumSize(QtCore.QSize(0, 100))
self.widget_21.setMaximumSize(QtCore.QSize(16777215, 200))
self.widget_21.setObjectName("widget_21")
self.gridLayout_3 = QtWidgets.QGridLayout(self.widget_21)
self.gridLayout_3.setContentsMargins(0, 0, 0, 0)
self.gridLayout_3.setVerticalSpacing(4)
self.gridLayout_3.setObjectName("gridLayout_3")
self.label_16 = QtWidgets.QLabel(self.widget_21)
self.label_16.setMaximumSize(QtCore.QSize(16777215, 18))
self.label_16.setLayoutDirection(QtCore.Qt.LeftToRight)
self.label_16.setAlignment(QtCore.Qt.AlignCenter)
self.label_16.setObjectName("label_16")
self.gridLayout_3.addWidget(self.label_16, 0, 1, 1, 1)
self.input_atom_2 = QtWidgets.QLineEdit(self.widget_21)
self.input_atom_2.setMinimumSize(QtCore.QSize(65, 0))
self.input_atom_2.setMaximumSize(QtCore.QSize(90, 16777215))
self.input_atom_2.setMaxLength(8)
self.input_atom_2.setAlignment(QtCore.Qt.AlignCenter)
self.input_atom_2.setObjectName("input_atom_2")
self.gridLayout_3.addWidget(self.input_atom_2, 2, 0, 1, 1)
self.label_15 = QtWidgets.QLabel(self.widget_21)
self.label_15.setMaximumSize(QtCore.QSize(16777215, 18))
self.label_15.setAlignment(QtCore.Qt.AlignCenter)
self.label_15.setObjectName("label_15")
self.gridLayout_3.addWidget(self.label_15, 0, 0, 1, 1)
self.input_atom_1 = QtWidgets.QLineEdit(self.widget_21)
self.input_atom_1.setMinimumSize(QtCore.QSize(65, 0))
self.input_atom_1.setMaximumSize(QtCore.QSize(90, 16777215))
self.input_atom_1.setMaxLength(8)
self.input_atom_1.setAlignment(QtCore.Qt.AlignCenter)
self.input_atom_1.setObjectName("input_atom_1")
self.gridLayout_3.addWidget(self.input_atom_1, 1, 0, 1, 1)
self.input_atom_3 = QtWidgets.QLineEdit(self.widget_21)
self.input_atom_3.setMinimumSize(QtCore.QSize(65, 0))
self.input_atom_3.setMaximumSize(QtCore.QSize(90, 16777215))
self.input_atom_3.setMaxLength(8)
self.input_atom_3.setAlignment(QtCore.Qt.AlignCenter)
self.input_atom_3.setObjectName("input_atom_3")
self.gridLayout_3.addWidget(self.input_atom_3, 3, 0, 1, 1)
self.verticalLayout_3.addWidget(self.widget_21)
self.verticalLayout_5.addWidget(self.frame_2)
self.frame_8 = QtWidgets.QFrame(self.widget_2)
self.frame_8.setMinimumSize(QtCore.QSize(250, 65))
self.frame_8.setMaximumSize(QtCore.QSize(270, 65))
self.frame_8.setFrameShape(QtWidgets.QFrame.WinPanel)
self.frame_8.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame_8.setLineWidth(1)
self.frame_8.setMidLineWidth(0)
self.frame_8.setObjectName("frame_8")
self.verticalLayout_9 = QtWidgets.QVBoxLayout(self.frame_8)
self.verticalLayout_9.setContentsMargins(0, 0, 0, 0)
self.verticalLayout_9.setSpacing(0)
self.verticalLayout_9.setObjectName("verticalLayout_9")
self.widget_18 = QtWidgets.QWidget(self.frame_8)
self.widget_18.setMinimumSize(QtCore.QSize(0, 35))
self.widget_18.setMaximumSize(QtCore.QSize(16777215, 35))
self.widget_18.setObjectName("widget_18")
self.horizontalLayout_13 = QtWidgets.QHBoxLayout(self.widget_18)
self.horizontalLayout_13.setContentsMargins(5, 5, 5, 5)
self.horizontalLayout_13.setSpacing(5)
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.label_12 = QtWidgets.QLabel(self.widget_18)
self.label_12.setMinimumSize(QtCore.QSize(0, 27))
self.label_12.setMaximumSize(QtCore.QSize(16777215, 27))
self.label_12.setSizeIncrement(QtCore.QSize(0, 30))
self.label_12.setAlignment(QtCore.Qt.AlignCenter)
self.label_12.setObjectName("label_12")
self.horizontalLayout_13.addWidget(self.label_12)
self.verticalLayout_9.addWidget(self.widget_18)
self.widget_19 = QtWidgets.QWidget(self.frame_8)
self.widget_19.setMinimumSize(QtCore.QSize(100, 33))
self.widget_19.setMaximumSize(QtCore.QSize(16777215, 33))
self.widget_19.setObjectName("widget_19")
self.horizontalLayout_14 = QtWidgets.QHBoxLayout(self.widget_19)
self.horizontalLayout_14.setContentsMargins(0, 0, 0, 0)
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.save_REF_txt = QtWidgets.QPushButton(self.widget_19)
self.save_REF_txt.setMinimumSize(QtCore.QSize(32, 32))
self.save_REF_txt.setObjectName("save_REF_txt")
self.horizontalLayout_14.addWidget(self.save_REF_txt)
self.save_RC_txt = QtWidgets.QPushButton(self.widget_19)
self.save_RC_txt.setMinimumSize(QtCore.QSize(32, 32))
self.save_RC_txt.setObjectName("save_RC_txt")
self.horizontalLayout_14.addWidget(self.save_RC_txt)
self.verticalLayout_9.addWidget(self.widget_19)
self.verticalLayout_5.addWidget(self.frame_8)
self.gridLayout.addWidget(self.widget_2, 1, 0, 1, 1)
self.frame_4 = QtWidgets.QFrame(self.KLDD_widget)
self.frame_4.setMinimumSize(QtCore.QSize(490, 300))
self.frame_4.setMaximumSize(QtCore.QSize(490, 300))
self.frame_4.setFrameShape(QtWidgets.QFrame.WinPanel)
self.frame_4.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame_4.setLineWidth(1)
self.frame_4.setObjectName("frame_4")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.frame_4)
self.verticalLayout_2.setContentsMargins(3, 0, 3, 0)
self.verticalLayout_2.setSpacing(0)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.widget_8 = QtWidgets.QWidget(self.frame_4)
self.widget_8.setMaximumSize(QtCore.QSize(16777215, 50))
self.widget_8.setObjectName("widget_8")
self.horizontalLayout_4 = QtWidgets.QHBoxLayout(self.widget_8)
self.horizontalLayout_4.setContentsMargins(-1, 5, -1, 5)
self.horizontalLayout_4.setSpacing(5)
self.horizontalLayout_4.setObjectName("horizontalLayout_4")
spacerItem2 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem2)
self.label_4 = QtWidgets.QLabel(self.widget_8)
self.label_4.setMaximumSize(QtCore.QSize(16777215, 30))
self.label_4.setTextFormat(QtCore.Qt.RichText)
self.label_4.setScaledContents(True)
self.label_4.setAlignment(QtCore.Qt.AlignCenter)
self.label_4.setObjectName("label_4")
self.horizontalLayout_4.addWidget(self.label_4)
self.push_simu_RC = QtWidgets.QPushButton(self.widget_8)
self.push_simu_RC.setMinimumSize(QtCore.QSize(100, 30))
self.push_simu_RC.setMaximumSize(QtCore.QSize(100, 30))
self.push_simu_RC.setObjectName("push_simu_RC")
self.horizontalLayout_4.addWidget(self.push_simu_RC)
self.bt_load_rc = QtWidgets.QPushButton(self.widget_8)
self.bt_load_rc.setMinimumSize(QtCore.QSize(120, 30))
self.bt_load_rc.setMaximumSize(QtCore.QSize(120, 30))
self.bt_load_rc.setObjectName("bt_load_rc")
self.horizontalLayout_4.addWidget(self.bt_load_rc)
spacerItem3 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_4.addItem(spacerItem3)
self.verticalLayout_2.addWidget(self.widget_8)
self.gridLayout.addWidget(self.frame_4, 1, 1, 1, 1)
MainWindow.setCentralWidget(self.KLDD_widget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 805, 22))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.actionLOAD = QtWidgets.QAction(MainWindow)
self.actionLOAD.setObjectName("actionLOAD")
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "KLDD"))
self.label_3.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:18pt; font-weight:600; color:#000080;\">Reflectivities</span></p></body></html>"))
self.push_simu_REF.setText(_translate("MainWindow", "SIMULATE"))
self.bt_load_ref.setText(_translate("MainWindow", "LOAD EXP REF"))
self.label.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:18pt; font-weight:600; color:#000000;\">Load Material Information</span></p></body></html>"))
self.show_load.setPlaceholderText(_translate("MainWindow", "Loaded File"))
self.browse_file.setText(_translate("MainWindow", "Browse"))
self.label_5.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:16pt;\">Bragg Reflection (</span><span style=\" font-size:16pt; font-style:italic;\">h</span><span style=\" font-size:16pt;\">, </span><span style=\" font-size:16pt; font-style:italic;\">k</span><span style=\" font-size:16pt;\">, </span><span style=\" font-size:16pt; font-style:italic;\">l </span><span style=\" font-size:16pt;\">)</span></p></body></html>"))
self.Bragg_h.setPlaceholderText(_translate("MainWindow", "h"))
self.Bragg_k.setPlaceholderText(_translate("MainWindow", "k"))
self.Bragg_l.setPlaceholderText(_translate("MainWindow", "l"))
self.label_7.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:16pt;\">Photon Energy</span></p></body></html>"))
self.label_9.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:16pt;\">Scan Mode</span></p></body></html>"))
self.choose_mode.setItemText(0, _translate("MainWindow", "Angular"))
self.choose_mode.setItemText(1, _translate("MainWindow", "Photonic"))
self.label_6.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:16pt;\">Simulation Range (deg/eV)</span></p></body></html>"))
self.range_start.setPlaceholderText(_translate("MainWindow", "Start"))
self.range_end.setPlaceholderText(_translate("MainWindow", "End"))
self.step_size.setPlaceholderText(_translate("MainWindow", "Step Size"))
self.label_8.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:16pt;\">Polar Angle</span></p></body></html>"))
self.label_2.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:16pt;\">Take-off Angle</span></p></body></html>"))
self.polar_angle.setPlaceholderText(_translate("MainWindow", "deg"))
self.take_off_angle.setPlaceholderText(_translate("MainWindow", "deg"))
self.label_10.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:18pt; color:#000000;\">Gaussian broaden</span></p></body></html>"))
self.DWF.setPlaceholderText(_translate("MainWindow", "Debye-Waller Fac."))
self.sigma.setPlaceholderText(_translate("MainWindow", "Sigma"))
self.label_13.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:16pt;\">   # Unit Cells</span></p></body></html>"))
self.label_11.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:16pt;\">IMFP (<span>Å</span>)</span></p></body></html>"))
self.label_16.setToolTip(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:14pt; color:#0000ff;\">Select One or Multiple Atomic Species to Group</span></p></body></html>"))
self.label_16.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:16pt;\">RCs Group</span></p></body></html>"))
self.input_atom_2.setPlaceholderText(_translate("MainWindow", "species 2"))
self.label_15.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:16pt;\">Name</span></p></body></html>"))
self.input_atom_1.setPlaceholderText(_translate("MainWindow", "species 1"))
self.input_atom_3.setPlaceholderText(_translate("MainWindow", "species 3"))
self.label_12.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:18pt; font-weight:600; color:#000000;\">Save Simulations (.txt)</span></p></body></html>"))
self.save_REF_txt.setToolTip(_translate("MainWindow", "<html><head/><body><p><br/></p></body></html>"))
self.save_REF_txt.setText(_translate("MainWindow", "SAVE REFs"))
self.save_RC_txt.setText(_translate("MainWindow", "SAVE RCs"))
self.label_4.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:18pt; font-weight:600; color:#000080;\">Rocking Curves</span></p></body></html>"))
self.push_simu_RC.setText(_translate("MainWindow", "SIMULATE"))
self.bt_load_rc.setText(_translate("MainWindow", "LOAD EXP RCs"))
self.actionLOAD.setText(_translate("MainWindow", "LOAD"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| 60.947276
| 462
| 0.715592
|
4a12ebaf33afaa389a0ca7c124325c9687bc0570
| 37,501
|
py
|
Python
|
venv/lib/python3.8/site-packages/pip/_vendor/msgpack/fallback.py
|
realxwx/leetcode-solve
|
3a7d7d8e92a5fd5fecc347d141a1c532b92e763e
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/pip/_vendor/msgpack/fallback.py
|
realxwx/leetcode-solve
|
3a7d7d8e92a5fd5fecc347d141a1c532b92e763e
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.8/site-packages/pip/_vendor/msgpack/fallback.py
|
realxwx/leetcode-solve
|
3a7d7d8e92a5fd5fecc347d141a1c532b92e763e
|
[
"Apache-2.0"
] | null | null | null |
"""Fallback pure Python implementation of msgpack"""
# Copyright (c) 2020
# Author: xiaoweixiang
import sys
import struct
import warnings
if sys.version_info[0] == 2:
PY2 = True
int_types = (int, long)
def dict_iteritems(d):
return d.iteritems()
else:
PY2 = False
int_types = int
unicode = str
xrange = range
def dict_iteritems(d):
return d.items()
if sys.version_info < (3, 5):
# Ugly hack...
RecursionError = RuntimeError
def _is_recursionerror(e):
return len(e.args) == 1 and isinstance(e.args[0], str) and \
e.args[0].startswith('maximum recursion depth exceeded')
else:
def _is_recursionerror(e):
return True
if hasattr(sys, 'pypy_version_info'):
# cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own
# StringBuilder is fastest.
from __pypy__ import newlist_hint
try:
from __pypy__.builders import BytesBuilder as StringBuilder
except ImportError:
from __pypy__.builders import StringBuilder
USING_STRINGBUILDER = True
class StringIO(object):
def __init__(self, s=b''):
if s:
self.builder = StringBuilder(len(s))
self.builder.append(s)
else:
self.builder = StringBuilder()
def write(self, s):
if isinstance(s, memoryview):
s = s.tobytes()
elif isinstance(s, bytearray):
s = bytes(s)
self.builder.append(s)
def getvalue(self):
return self.builder.build()
else:
USING_STRINGBUILDER = False
from io import BytesIO as StringIO
newlist_hint = lambda size: []
from .exceptions import (
BufferFull,
OutOfData,
ExtraData,
FormatError,
StackError,
)
from . import ExtType
EX_SKIP = 0
EX_CONSTRUCT = 1
EX_READ_ARRAY_HEADER = 2
EX_READ_MAP_HEADER = 3
TYPE_IMMEDIATE = 0
TYPE_ARRAY = 1
TYPE_MAP = 2
TYPE_RAW = 3
TYPE_BIN = 4
TYPE_EXT = 5
DEFAULT_RECURSE_LIMIT = 511
def _check_type_strict(obj, t, type=type, tuple=tuple):
if type(t) is tuple:
return type(obj) in t
else:
return type(obj) is t
def _get_data_from_buffer(obj):
try:
view = memoryview(obj)
except TypeError:
# try to use legacy buffer protocol if 2.7, otherwise re-raise
if PY2:
view = memoryview(buffer(obj))
warnings.warn("using old buffer interface to unpack %s; "
"this leads to unpacking errors if slicing is used and "
"will be removed in a future version" % type(obj),
RuntimeWarning, stacklevel=3)
else:
raise
if view.itemsize != 1:
raise ValueError("cannot unpack from multi-byte object")
return view
def unpack(stream, **kwargs):
warnings.warn(
"Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.",
DeprecationWarning, stacklevel=2)
data = stream.read()
return unpackb(data, **kwargs)
def unpackb(packed, **kwargs):
"""
Unpack an object from `packed`.
Raises ``ExtraData`` when *packed* contains extra bytes.
Raises ``ValueError`` when *packed* is incomplete.
Raises ``FormatError`` when *packed* is not valid msgpack.
Raises ``StackError`` when *packed* contains too nested.
Other exceptions can be raised during unpacking.
See :class:`Unpacker` for options.
"""
unpacker = Unpacker(None, max_buffer_size=len(packed), **kwargs)
unpacker.feed(packed)
try:
ret = unpacker._unpack()
except OutOfData:
raise ValueError("Unpack failed: incomplete input")
except RecursionError as e:
if _is_recursionerror(e):
raise StackError
raise
if unpacker._got_extradata():
raise ExtraData(ret, unpacker._get_extradata())
return ret
if sys.version_info < (2, 7, 6):
def _unpack_from(f, b, o=0):
"""Explicit typcast for legacy struct.unpack_from"""
return struct.unpack_from(f, bytes(b), o)
else:
_unpack_from = struct.unpack_from
class Unpacker(object):
"""Streaming unpacker.
arguments:
:param file_like:
File-like object having `.read(n)` method.
If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable.
:param int read_size:
Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`)
:param bool use_list:
If true, unpack msgpack array to Python list.
Otherwise, unpack to Python tuple. (default: True)
:param bool raw:
If true, unpack msgpack raw to Python bytes (default).
Otherwise, unpack to Python str (or unicode on Python 2) by decoding
with UTF-8 encoding (recommended).
Currently, the default is true, but it will be changed to false in
near future. So you must specify it explicitly for keeping backward
compatibility.
*encoding* option which is deprecated overrides this option.
:param bool strict_map_key:
If true, only str or bytes are accepted for map (dict) keys.
It's False by default for backward-compatibility.
But it will be True from msgpack 1.0.
:param callable object_hook:
When specified, it should be callable.
Unpacker calls it with a dict argument after unpacking msgpack map.
(See also simplejson)
:param callable object_pairs_hook:
When specified, it should be callable.
Unpacker calls it with a list of key-value pairs after unpacking msgpack map.
(See also simplejson)
:param str encoding:
Encoding used for decoding msgpack raw.
If it is None (default), msgpack raw is deserialized to Python bytes.
:param str unicode_errors:
(deprecated) Used for decoding msgpack raw with *encoding*.
(default: `'strict'`)
:param int max_buffer_size:
Limits size of data waiting unpacked. 0 means system's INT_MAX (default).
Raises `BufferFull` exception when it is insufficient.
You should set this parameter when unpacking data from untrusted source.
:param int max_str_len:
Deprecated, use *max_buffer_size* instead.
Limits max length of str. (default: max_buffer_size or 1024*1024)
:param int max_bin_len:
Deprecated, use *max_buffer_size* instead.
Limits max length of bin. (default: max_buffer_size or 1024*1024)
:param int max_array_len:
Limits max length of array.
(default: max_buffer_size or 128*1024)
:param int max_map_len:
Limits max length of map.
(default: max_buffer_size//2 or 32*1024)
:param int max_ext_len:
Deprecated, use *max_buffer_size* instead.
Limits max size of ext type. (default: max_buffer_size or 1024*1024)
Example of streaming deserialize from file-like object::
unpacker = Unpacker(file_like, raw=False, max_buffer_size=10*1024*1024)
for o in unpacker:
process(o)
Example of streaming deserialize from socket::
unpacker = Unpacker(raw=False, max_buffer_size=10*1024*1024)
while True:
buf = sock.recv(1024**2)
if not buf:
break
unpacker.feed(buf)
for o in unpacker:
process(o)
Raises ``ExtraData`` when *packed* contains extra bytes.
Raises ``OutOfData`` when *packed* is incomplete.
Raises ``FormatError`` when *packed* is not valid msgpack.
Raises ``StackError`` when *packed* contains too nested.
Other exceptions can be raised during unpacking.
"""
def __init__(self, file_like=None, read_size=0, use_list=True, raw=True, strict_map_key=False,
object_hook=None, object_pairs_hook=None, list_hook=None,
encoding=None, unicode_errors=None, max_buffer_size=0,
ext_hook=ExtType,
max_str_len=-1,
max_bin_len=-1,
max_array_len=-1,
max_map_len=-1,
max_ext_len=-1):
if encoding is not None:
warnings.warn(
"encoding is deprecated, Use raw=False instead.",
DeprecationWarning, stacklevel=2)
if unicode_errors is None:
unicode_errors = 'strict'
if file_like is None:
self._feeding = True
else:
if not callable(file_like.read):
raise TypeError("`file_like.read` must be callable")
self.file_like = file_like
self._feeding = False
#: array of bytes fed.
self._buffer = bytearray()
#: Which position we currently reads
self._buff_i = 0
# When Unpacker is used as an iterable, between the calls to next(),
# the buffer is not "consumed" completely, for efficiency sake.
# Instead, it is done sloppily. To make sure we raise BufferFull at
# the correct moments, we have to keep track of how sloppy we were.
# Furthermore, when the buffer is incomplete (that is: in the case
# we raise an OutOfData) we need to rollback the buffer to the correct
# state, which _buf_checkpoint records.
self._buf_checkpoint = 0
if max_str_len == -1:
max_str_len = max_buffer_size or 1024*1024
if max_bin_len == -1:
max_bin_len = max_buffer_size or 1024*1024
if max_array_len == -1:
max_array_len = max_buffer_size or 128*1024
if max_map_len == -1:
max_map_len = max_buffer_size//2 or 32*1024
if max_ext_len == -1:
max_ext_len = max_buffer_size or 1024*1024
self._max_buffer_size = max_buffer_size or 2**31-1
if read_size > self._max_buffer_size:
raise ValueError("read_size must be smaller than max_buffer_size")
self._read_size = read_size or min(self._max_buffer_size, 16*1024)
self._raw = bool(raw)
self._strict_map_key = bool(strict_map_key)
self._encoding = encoding
self._unicode_errors = unicode_errors
self._use_list = use_list
self._list_hook = list_hook
self._object_hook = object_hook
self._object_pairs_hook = object_pairs_hook
self._ext_hook = ext_hook
self._max_str_len = max_str_len
self._max_bin_len = max_bin_len
self._max_array_len = max_array_len
self._max_map_len = max_map_len
self._max_ext_len = max_ext_len
self._stream_offset = 0
if list_hook is not None and not callable(list_hook):
raise TypeError('`list_hook` is not callable')
if object_hook is not None and not callable(object_hook):
raise TypeError('`object_hook` is not callable')
if object_pairs_hook is not None and not callable(object_pairs_hook):
raise TypeError('`object_pairs_hook` is not callable')
if object_hook is not None and object_pairs_hook is not None:
raise TypeError("object_pairs_hook and object_hook are mutually "
"exclusive")
if not callable(ext_hook):
raise TypeError("`ext_hook` is not callable")
def feed(self, next_bytes):
assert self._feeding
view = _get_data_from_buffer(next_bytes)
if (len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size):
raise BufferFull
# Strip buffer before checkpoint before reading file.
if self._buf_checkpoint > 0:
del self._buffer[:self._buf_checkpoint]
self._buff_i -= self._buf_checkpoint
self._buf_checkpoint = 0
# Use extend here: INPLACE_ADD += doesn't reliably typecast memoryview in jython
self._buffer.extend(view)
def _consume(self):
""" Gets rid of the used parts of the buffer. """
self._stream_offset += self._buff_i - self._buf_checkpoint
self._buf_checkpoint = self._buff_i
def _got_extradata(self):
return self._buff_i < len(self._buffer)
def _get_extradata(self):
return self._buffer[self._buff_i:]
def read_bytes(self, n):
return self._read(n)
def _read(self, n):
# (int) -> bytearray
self._reserve(n)
i = self._buff_i
self._buff_i = i+n
return self._buffer[i:i+n]
def _reserve(self, n):
remain_bytes = len(self._buffer) - self._buff_i - n
# Fast path: buffer has n bytes already
if remain_bytes >= 0:
return
if self._feeding:
self._buff_i = self._buf_checkpoint
raise OutOfData
# Strip buffer before checkpoint before reading file.
if self._buf_checkpoint > 0:
del self._buffer[:self._buf_checkpoint]
self._buff_i -= self._buf_checkpoint
self._buf_checkpoint = 0
# Read from file
remain_bytes = -remain_bytes
while remain_bytes > 0:
to_read_bytes = max(self._read_size, remain_bytes)
read_data = self.file_like.read(to_read_bytes)
if not read_data:
break
assert isinstance(read_data, bytes)
self._buffer += read_data
remain_bytes -= len(read_data)
if len(self._buffer) < n + self._buff_i:
self._buff_i = 0 # rollback
raise OutOfData
def _read_header(self, execute=EX_CONSTRUCT):
typ = TYPE_IMMEDIATE
n = 0
obj = None
self._reserve(1)
b = self._buffer[self._buff_i]
self._buff_i += 1
if b & 0b10000000 == 0:
obj = b
elif b & 0b11100000 == 0b11100000:
obj = -1 - (b ^ 0xff)
elif b & 0b11100000 == 0b10100000:
n = b & 0b00011111
typ = TYPE_RAW
if n > self._max_str_len:
raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len)
obj = self._read(n)
elif b & 0b11110000 == 0b10010000:
n = b & 0b00001111
typ = TYPE_ARRAY
if n > self._max_array_len:
raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len)
elif b & 0b11110000 == 0b10000000:
n = b & 0b00001111
typ = TYPE_MAP
if n > self._max_map_len:
raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len)
elif b == 0xc0:
obj = None
elif b == 0xc2:
obj = False
elif b == 0xc3:
obj = True
elif b == 0xc4:
typ = TYPE_BIN
self._reserve(1)
n = self._buffer[self._buff_i]
self._buff_i += 1
if n > self._max_bin_len:
raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len))
obj = self._read(n)
elif b == 0xc5:
typ = TYPE_BIN
self._reserve(2)
n = _unpack_from(">H", self._buffer, self._buff_i)[0]
self._buff_i += 2
if n > self._max_bin_len:
raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len))
obj = self._read(n)
elif b == 0xc6:
typ = TYPE_BIN
self._reserve(4)
n = _unpack_from(">I", self._buffer, self._buff_i)[0]
self._buff_i += 4
if n > self._max_bin_len:
raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len))
obj = self._read(n)
elif b == 0xc7: # ext 8
typ = TYPE_EXT
self._reserve(2)
L, n = _unpack_from('Bb', self._buffer, self._buff_i)
self._buff_i += 2
if L > self._max_ext_len:
raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len))
obj = self._read(L)
elif b == 0xc8: # ext 16
typ = TYPE_EXT
self._reserve(3)
L, n = _unpack_from('>Hb', self._buffer, self._buff_i)
self._buff_i += 3
if L > self._max_ext_len:
raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len))
obj = self._read(L)
elif b == 0xc9: # ext 32
typ = TYPE_EXT
self._reserve(5)
L, n = _unpack_from('>Ib', self._buffer, self._buff_i)
self._buff_i += 5
if L > self._max_ext_len:
raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len))
obj = self._read(L)
elif b == 0xca:
self._reserve(4)
obj = _unpack_from(">f", self._buffer, self._buff_i)[0]
self._buff_i += 4
elif b == 0xcb:
self._reserve(8)
obj = _unpack_from(">d", self._buffer, self._buff_i)[0]
self._buff_i += 8
elif b == 0xcc:
self._reserve(1)
obj = self._buffer[self._buff_i]
self._buff_i += 1
elif b == 0xcd:
self._reserve(2)
obj = _unpack_from(">H", self._buffer, self._buff_i)[0]
self._buff_i += 2
elif b == 0xce:
self._reserve(4)
obj = _unpack_from(">I", self._buffer, self._buff_i)[0]
self._buff_i += 4
elif b == 0xcf:
self._reserve(8)
obj = _unpack_from(">Q", self._buffer, self._buff_i)[0]
self._buff_i += 8
elif b == 0xd0:
self._reserve(1)
obj = _unpack_from("b", self._buffer, self._buff_i)[0]
self._buff_i += 1
elif b == 0xd1:
self._reserve(2)
obj = _unpack_from(">h", self._buffer, self._buff_i)[0]
self._buff_i += 2
elif b == 0xd2:
self._reserve(4)
obj = _unpack_from(">i", self._buffer, self._buff_i)[0]
self._buff_i += 4
elif b == 0xd3:
self._reserve(8)
obj = _unpack_from(">q", self._buffer, self._buff_i)[0]
self._buff_i += 8
elif b == 0xd4: # fixext 1
typ = TYPE_EXT
if self._max_ext_len < 1:
raise ValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len))
self._reserve(2)
n, obj = _unpack_from("b1s", self._buffer, self._buff_i)
self._buff_i += 2
elif b == 0xd5: # fixext 2
typ = TYPE_EXT
if self._max_ext_len < 2:
raise ValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len))
self._reserve(3)
n, obj = _unpack_from("b2s", self._buffer, self._buff_i)
self._buff_i += 3
elif b == 0xd6: # fixext 4
typ = TYPE_EXT
if self._max_ext_len < 4:
raise ValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len))
self._reserve(5)
n, obj = _unpack_from("b4s", self._buffer, self._buff_i)
self._buff_i += 5
elif b == 0xd7: # fixext 8
typ = TYPE_EXT
if self._max_ext_len < 8:
raise ValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len))
self._reserve(9)
n, obj = _unpack_from("b8s", self._buffer, self._buff_i)
self._buff_i += 9
elif b == 0xd8: # fixext 16
typ = TYPE_EXT
if self._max_ext_len < 16:
raise ValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len))
self._reserve(17)
n, obj = _unpack_from("b16s", self._buffer, self._buff_i)
self._buff_i += 17
elif b == 0xd9:
typ = TYPE_RAW
self._reserve(1)
n = self._buffer[self._buff_i]
self._buff_i += 1
if n > self._max_str_len:
raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len)
obj = self._read(n)
elif b == 0xda:
typ = TYPE_RAW
self._reserve(2)
n, = _unpack_from(">H", self._buffer, self._buff_i)
self._buff_i += 2
if n > self._max_str_len:
raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len)
obj = self._read(n)
elif b == 0xdb:
typ = TYPE_RAW
self._reserve(4)
n, = _unpack_from(">I", self._buffer, self._buff_i)
self._buff_i += 4
if n > self._max_str_len:
raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len)
obj = self._read(n)
elif b == 0xdc:
typ = TYPE_ARRAY
self._reserve(2)
n, = _unpack_from(">H", self._buffer, self._buff_i)
self._buff_i += 2
if n > self._max_array_len:
raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len)
elif b == 0xdd:
typ = TYPE_ARRAY
self._reserve(4)
n, = _unpack_from(">I", self._buffer, self._buff_i)
self._buff_i += 4
if n > self._max_array_len:
raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len)
elif b == 0xde:
self._reserve(2)
n, = _unpack_from(">H", self._buffer, self._buff_i)
self._buff_i += 2
if n > self._max_map_len:
raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len)
typ = TYPE_MAP
elif b == 0xdf:
self._reserve(4)
n, = _unpack_from(">I", self._buffer, self._buff_i)
self._buff_i += 4
if n > self._max_map_len:
raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len)
typ = TYPE_MAP
else:
raise FormatError("Unknown header: 0x%x" % b)
return typ, n, obj
def _unpack(self, execute=EX_CONSTRUCT):
typ, n, obj = self._read_header(execute)
if execute == EX_READ_ARRAY_HEADER:
if typ != TYPE_ARRAY:
raise ValueError("Expected array")
return n
if execute == EX_READ_MAP_HEADER:
if typ != TYPE_MAP:
raise ValueError("Expected map")
return n
# TODO should we eliminate the recursion?
if typ == TYPE_ARRAY:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call `list_hook`
self._unpack(EX_SKIP)
return
ret = newlist_hint(n)
for i in xrange(n):
ret.append(self._unpack(EX_CONSTRUCT))
if self._list_hook is not None:
ret = self._list_hook(ret)
# TODO is the interaction between `list_hook` and `use_list` ok?
return ret if self._use_list else tuple(ret)
if typ == TYPE_MAP:
if execute == EX_SKIP:
for i in xrange(n):
# TODO check whether we need to call hooks
self._unpack(EX_SKIP)
self._unpack(EX_SKIP)
return
if self._object_pairs_hook is not None:
ret = self._object_pairs_hook(
(self._unpack(EX_CONSTRUCT),
self._unpack(EX_CONSTRUCT))
for _ in xrange(n))
else:
ret = {}
for _ in xrange(n):
key = self._unpack(EX_CONSTRUCT)
if self._strict_map_key and type(key) not in (unicode, bytes):
raise ValueError("%s is not allowed for map key" % str(type(key)))
ret[key] = self._unpack(EX_CONSTRUCT)
if self._object_hook is not None:
ret = self._object_hook(ret)
return ret
if execute == EX_SKIP:
return
if typ == TYPE_RAW:
if self._encoding is not None:
obj = obj.decode(self._encoding, self._unicode_errors)
elif self._raw:
obj = bytes(obj)
else:
obj = obj.decode('utf_8')
return obj
if typ == TYPE_EXT:
return self._ext_hook(n, bytes(obj))
if typ == TYPE_BIN:
return bytes(obj)
assert typ == TYPE_IMMEDIATE
return obj
def __iter__(self):
return self
def __next__(self):
try:
ret = self._unpack(EX_CONSTRUCT)
self._consume()
return ret
except OutOfData:
self._consume()
raise StopIteration
except RecursionError:
raise StackError
next = __next__
def skip(self):
self._unpack(EX_SKIP)
self._consume()
def unpack(self):
try:
ret = self._unpack(EX_CONSTRUCT)
except RecursionError:
raise StackError
self._consume()
return ret
def read_array_header(self):
ret = self._unpack(EX_READ_ARRAY_HEADER)
self._consume()
return ret
def read_map_header(self):
ret = self._unpack(EX_READ_MAP_HEADER)
self._consume()
return ret
def tell(self):
return self._stream_offset
class Packer(object):
"""
MessagePack Packer
usage:
packer = Packer()
astream.write(packer.pack(a))
astream.write(packer.pack(b))
Packer's constructor has some keyword arguments:
:param callable default:
Convert user type to builtin type that Packer supports.
See also simplejson's document.
:param bool use_single_float:
Use single precision float type for float. (default: False)
:param bool autoreset:
Reset buffer after each pack and return its content as `bytes`. (default: True).
If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.
:param bool use_bin_type:
Use bin type introduced in msgpack spec 2.0 for bytes.
It also enables str8 type for unicode.
:param bool strict_types:
If set to true, types will be checked to be exact. Derived classes
from serializeable types will not be serialized and will be
treated as unsupported type and forwarded to default.
Additionally tuples will not be serialized as lists.
This is useful when trying to implement accurate serialization
for python types.
:param str encoding:
(deprecated) Convert unicode to bytes with this encoding. (default: 'utf-8')
:param str unicode_errors:
Error handler for encoding unicode. (default: 'strict')
"""
def __init__(self, default=None, encoding=None, unicode_errors=None,
use_single_float=False, autoreset=True, use_bin_type=False,
strict_types=False):
if encoding is None:
encoding = 'utf_8'
else:
warnings.warn(
"encoding is deprecated, Use raw=False instead.",
DeprecationWarning, stacklevel=2)
if unicode_errors is None:
unicode_errors = 'strict'
self._strict_types = strict_types
self._use_float = use_single_float
self._autoreset = autoreset
self._use_bin_type = use_bin_type
self._encoding = encoding
self._unicode_errors = unicode_errors
self._buffer = StringIO()
if default is not None:
if not callable(default):
raise TypeError("default must be callable")
self._default = default
def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT,
check=isinstance, check_type_strict=_check_type_strict):
default_used = False
if self._strict_types:
check = check_type_strict
list_types = list
else:
list_types = (list, tuple)
while True:
if nest_limit < 0:
raise ValueError("recursion limit exceeded")
if obj is None:
return self._buffer.write(b"\xc0")
if check(obj, bool):
if obj:
return self._buffer.write(b"\xc3")
return self._buffer.write(b"\xc2")
if check(obj, int_types):
if 0 <= obj < 0x80:
return self._buffer.write(struct.pack("B", obj))
if -0x20 <= obj < 0:
return self._buffer.write(struct.pack("b", obj))
if 0x80 <= obj <= 0xff:
return self._buffer.write(struct.pack("BB", 0xcc, obj))
if -0x80 <= obj < 0:
return self._buffer.write(struct.pack(">Bb", 0xd0, obj))
if 0xff < obj <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xcd, obj))
if -0x8000 <= obj < -0x80:
return self._buffer.write(struct.pack(">Bh", 0xd1, obj))
if 0xffff < obj <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xce, obj))
if -0x80000000 <= obj < -0x8000:
return self._buffer.write(struct.pack(">Bi", 0xd2, obj))
if 0xffffffff < obj <= 0xffffffffffffffff:
return self._buffer.write(struct.pack(">BQ", 0xcf, obj))
if -0x8000000000000000 <= obj < -0x80000000:
return self._buffer.write(struct.pack(">Bq", 0xd3, obj))
if not default_used and self._default is not None:
obj = self._default(obj)
default_used = True
continue
raise OverflowError("Integer value out of range")
if check(obj, (bytes, bytearray)):
n = len(obj)
if n >= 2**32:
raise ValueError("%s is too large" % type(obj).__name__)
self._pack_bin_header(n)
return self._buffer.write(obj)
if check(obj, unicode):
if self._encoding is None:
raise TypeError(
"Can't encode unicode string: "
"no encoding is specified")
obj = obj.encode(self._encoding, self._unicode_errors)
n = len(obj)
if n >= 2**32:
raise ValueError("String is too large")
self._pack_raw_header(n)
return self._buffer.write(obj)
if check(obj, memoryview):
n = len(obj) * obj.itemsize
if n >= 2**32:
raise ValueError("Memoryview is too large")
self._pack_bin_header(n)
return self._buffer.write(obj)
if check(obj, float):
if self._use_float:
return self._buffer.write(struct.pack(">Bf", 0xca, obj))
return self._buffer.write(struct.pack(">Bd", 0xcb, obj))
if check(obj, ExtType):
code = obj.code
data = obj.data
assert isinstance(code, int)
assert isinstance(data, bytes)
L = len(data)
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(struct.pack(">BB", 0xc7, L))
elif L <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xc8, L))
else:
self._buffer.write(struct.pack(">BI", 0xc9, L))
self._buffer.write(struct.pack("b", code))
self._buffer.write(data)
return
if check(obj, list_types):
n = len(obj)
self._pack_array_header(n)
for i in xrange(n):
self._pack(obj[i], nest_limit - 1)
return
if check(obj, dict):
return self._pack_map_pairs(len(obj), dict_iteritems(obj),
nest_limit - 1)
if not default_used and self._default is not None:
obj = self._default(obj)
default_used = 1
continue
raise TypeError("Cannot serialize %r" % (obj, ))
def pack(self, obj):
try:
self._pack(obj)
except:
self._buffer = StringIO() # force reset
raise
if self._autoreset:
ret = self._buffer.getvalue()
self._buffer = StringIO()
return ret
def pack_map_pairs(self, pairs):
self._pack_map_pairs(len(pairs), pairs)
if self._autoreset:
ret = self._buffer.getvalue()
self._buffer = StringIO()
return ret
def pack_array_header(self, n):
if n >= 2**32:
raise ValueError
self._pack_array_header(n)
if self._autoreset:
ret = self._buffer.getvalue()
self._buffer = StringIO()
return ret
def pack_map_header(self, n):
if n >= 2**32:
raise ValueError
self._pack_map_header(n)
if self._autoreset:
ret = self._buffer.getvalue()
self._buffer = StringIO()
return ret
def pack_ext_type(self, typecode, data):
if not isinstance(typecode, int):
raise TypeError("typecode must have int type.")
if not 0 <= typecode <= 127:
raise ValueError("typecode should be 0-127")
if not isinstance(data, bytes):
raise TypeError("data must have bytes type")
L = len(data)
if L > 0xffffffff:
raise ValueError("Too large data")
if L == 1:
self._buffer.write(b'\xd4')
elif L == 2:
self._buffer.write(b'\xd5')
elif L == 4:
self._buffer.write(b'\xd6')
elif L == 8:
self._buffer.write(b'\xd7')
elif L == 16:
self._buffer.write(b'\xd8')
elif L <= 0xff:
self._buffer.write(b'\xc7' + struct.pack('B', L))
elif L <= 0xffff:
self._buffer.write(b'\xc8' + struct.pack('>H', L))
else:
self._buffer.write(b'\xc9' + struct.pack('>I', L))
self._buffer.write(struct.pack('B', typecode))
self._buffer.write(data)
def _pack_array_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x90 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xdc, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdd, n))
raise ValueError("Array is too large")
def _pack_map_header(self, n):
if n <= 0x0f:
return self._buffer.write(struct.pack('B', 0x80 + n))
if n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xde, n))
if n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xdf, n))
raise ValueError("Dict is too large")
def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT):
self._pack_map_header(n)
for (k, v) in pairs:
self._pack(k, nest_limit - 1)
self._pack(v, nest_limit - 1)
def _pack_raw_header(self, n):
if n <= 0x1f:
self._buffer.write(struct.pack('B', 0xa0 + n))
elif self._use_bin_type and n <= 0xff:
self._buffer.write(struct.pack('>BB', 0xd9, n))
elif n <= 0xffff:
self._buffer.write(struct.pack(">BH", 0xda, n))
elif n <= 0xffffffff:
self._buffer.write(struct.pack(">BI", 0xdb, n))
else:
raise ValueError('Raw is too large')
def _pack_bin_header(self, n):
if not self._use_bin_type:
return self._pack_raw_header(n)
elif n <= 0xff:
return self._buffer.write(struct.pack('>BB', 0xc4, n))
elif n <= 0xffff:
return self._buffer.write(struct.pack(">BH", 0xc5, n))
elif n <= 0xffffffff:
return self._buffer.write(struct.pack(">BI", 0xc6, n))
else:
raise ValueError('Bin is too large')
def bytes(self):
"""Return internal buffer contents as bytes object"""
return self._buffer.getvalue()
def reset(self):
"""Reset internal buffer.
This method is usaful only when autoreset=False.
"""
self._buffer = StringIO()
def getbuffer(self):
"""Return view of internal buffer."""
if USING_STRINGBUILDER or PY2:
return memoryview(self.bytes())
else:
return self._buffer.getbuffer()
| 36.373424
| 109
| 0.556945
|
4a12ec8adb9be275c8125142b0816a0265ea749b
| 6,868
|
py
|
Python
|
mmseg/datasets/builder.py
|
hanqiu-hq/mmsegmentation
|
29cfcd145c4878480f83ba89249eaca6f8856d92
|
[
"Apache-2.0"
] | 11
|
2022-02-04T01:09:45.000Z
|
2022-03-08T05:49:16.000Z
|
mmseg/datasets/builder.py
|
hanqiu-hq/mmsegmentation
|
29cfcd145c4878480f83ba89249eaca6f8856d92
|
[
"Apache-2.0"
] | 1
|
2022-01-07T15:03:23.000Z
|
2022-01-12T14:39:09.000Z
|
mmseg/datasets/builder.py
|
hanqiu-hq/mmsegmentation
|
29cfcd145c4878480f83ba89249eaca6f8856d92
|
[
"Apache-2.0"
] | 1
|
2022-01-25T05:13:37.000Z
|
2022-01-25T05:13:37.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import copy
import platform
import random
from functools import partial
import numpy as np
import torch
from mmcv.parallel import collate
from mmcv.runner import get_dist_info
from mmcv.utils import Registry, build_from_cfg, digit_version
from torch.utils.data import DataLoader, DistributedSampler
if platform.system() != 'Windows':
# https://github.com/pytorch/pytorch/issues/973
import resource
rlimit = resource.getrlimit(resource.RLIMIT_NOFILE)
base_soft_limit = rlimit[0]
hard_limit = rlimit[1]
soft_limit = min(max(4096, base_soft_limit), hard_limit)
resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit))
DATASETS = Registry('dataset')
PIPELINES = Registry('pipeline')
def _concat_dataset(cfg, default_args=None):
"""Build :obj:`ConcatDataset by."""
from .dataset_wrappers import ConcatDataset
img_dir = cfg['img_dir']
ann_dir = cfg.get('ann_dir', None)
split = cfg.get('split', None)
# pop 'separate_eval' since it is not a valid key for common datasets.
separate_eval = cfg.pop('separate_eval', True)
num_img_dir = len(img_dir) if isinstance(img_dir, (list, tuple)) else 1
if ann_dir is not None:
num_ann_dir = len(ann_dir) if isinstance(ann_dir, (list, tuple)) else 1
else:
num_ann_dir = 0
if split is not None:
num_split = len(split) if isinstance(split, (list, tuple)) else 1
else:
num_split = 0
if num_img_dir > 1:
assert num_img_dir == num_ann_dir or num_ann_dir == 0
assert num_img_dir == num_split or num_split == 0
else:
assert num_split == num_ann_dir or num_ann_dir <= 1
num_dset = max(num_split, num_img_dir)
datasets = []
for i in range(num_dset):
data_cfg = copy.deepcopy(cfg)
if isinstance(img_dir, (list, tuple)):
data_cfg['img_dir'] = img_dir[i]
if isinstance(ann_dir, (list, tuple)):
data_cfg['ann_dir'] = ann_dir[i]
if isinstance(split, (list, tuple)):
data_cfg['split'] = split[i]
datasets.append(build_dataset(data_cfg, default_args))
return ConcatDataset(datasets, separate_eval)
def build_dataset(cfg, default_args=None):
"""Build datasets."""
from .dataset_wrappers import (ConcatDataset, RepeatDataset,
MultiImageMixDataset)
if isinstance(cfg, (list, tuple)):
dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg])
elif cfg['type'] == 'RepeatDataset':
dataset = RepeatDataset(
build_dataset(cfg['dataset'], default_args), cfg['times'])
elif cfg['type'] == 'MultiImageMixDataset':
cp_cfg = copy.deepcopy(cfg)
cp_cfg['dataset'] = build_dataset(cp_cfg['dataset'])
cp_cfg.pop('type')
dataset = MultiImageMixDataset(**cp_cfg)
elif isinstance(cfg.get('img_dir'), (list, tuple)) or isinstance(
cfg.get('split', None), (list, tuple)):
dataset = _concat_dataset(cfg, default_args)
else:
dataset = build_from_cfg(cfg, DATASETS, default_args)
return dataset
def build_dataloader(dataset,
samples_per_gpu,
workers_per_gpu,
num_gpus=1,
dist=True,
shuffle=True,
seed=None,
drop_last=False,
pin_memory=True,
persistent_workers=True,
**kwargs):
"""Build PyTorch DataLoader.
In distributed training, each GPU/process has a dataloader.
In non-distributed training, there is only one dataloader for all GPUs.
Args:
dataset (Dataset): A PyTorch dataset.
samples_per_gpu (int): Number of training samples on each GPU, i.e.,
batch size of each GPU.
workers_per_gpu (int): How many subprocesses to use for data loading
for each GPU.
num_gpus (int): Number of GPUs. Only used in non-distributed training.
dist (bool): Distributed training/test or not. Default: True.
shuffle (bool): Whether to shuffle the data at every epoch.
Default: True.
seed (int | None): Seed to be used. Default: None.
drop_last (bool): Whether to drop the last incomplete batch in epoch.
Default: False
pin_memory (bool): Whether to use pin_memory in DataLoader.
Default: True
persistent_workers (bool): If True, the data loader will not shutdown
the worker processes after a dataset has been consumed once.
This allows to maintain the workers Dataset instances alive.
The argument also has effect in PyTorch>=1.7.0.
Default: True
kwargs: any keyword argument to be used to initialize DataLoader
Returns:
DataLoader: A PyTorch dataloader.
"""
rank, world_size = get_dist_info()
if dist:
sampler = DistributedSampler(
dataset, world_size, rank, shuffle=shuffle)
shuffle = False
batch_size = samples_per_gpu
num_workers = workers_per_gpu
else:
sampler = None
batch_size = num_gpus * samples_per_gpu
num_workers = num_gpus * workers_per_gpu
init_fn = partial(
worker_init_fn, num_workers=num_workers, rank=rank,
seed=seed) if seed is not None else None
if digit_version(torch.__version__) >= digit_version('1.8.0'):
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
persistent_workers=persistent_workers,
**kwargs)
else:
data_loader = DataLoader(
dataset,
batch_size=batch_size,
sampler=sampler,
num_workers=num_workers,
collate_fn=partial(collate, samples_per_gpu=samples_per_gpu),
pin_memory=pin_memory,
shuffle=shuffle,
worker_init_fn=init_fn,
drop_last=drop_last,
**kwargs)
return data_loader
def worker_init_fn(worker_id, num_workers, rank, seed):
"""Worker init func for dataloader.
The seed of each worker equals to num_worker * rank + worker_id + user_seed
Args:
worker_id (int): Worker id.
num_workers (int): Number of workers.
rank (int): The rank of current process.
seed (int): The random seed to use.
"""
worker_seed = num_workers * rank + worker_id + seed
np.random.seed(worker_seed)
random.seed(worker_seed)
| 36.338624
| 79
| 0.634974
|
4a12ee1dd3b5a3fb803e200124183bbd491703fc
| 90,204
|
py
|
Python
|
ThirdParty/Twisted/twisted/internet/interfaces.py
|
jasper-yeh/VtkDotNet
|
84b56f781cb511694e4380cebfb245bbefe2560b
|
[
"BSD-3-Clause"
] | 3
|
2020-06-20T23:31:06.000Z
|
2021-01-11T02:17:16.000Z
|
ThirdParty/Twisted/twisted/internet/interfaces.py
|
jasper-yeh/VtkDotNet
|
84b56f781cb511694e4380cebfb245bbefe2560b
|
[
"BSD-3-Clause"
] | null | null | null |
ThirdParty/Twisted/twisted/internet/interfaces.py
|
jasper-yeh/VtkDotNet
|
84b56f781cb511694e4380cebfb245bbefe2560b
|
[
"BSD-3-Clause"
] | 1
|
2019-08-30T08:41:21.000Z
|
2019-08-30T08:41:21.000Z
|
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Interface documentation.
Maintainer: Itamar Shtull-Trauring
"""
from __future__ import division, absolute_import
from zope.interface import Interface, Attribute
from twisted.python import deprecate
from twisted.python.versions import Version
class IAddress(Interface):
"""
An address, e.g. a TCP C{(host, port)}.
Default implementations are in L{twisted.internet.address}.
"""
### Reactor Interfaces
class IConnector(Interface):
"""
Object used to interface between connections and protocols.
Each L{IConnector} manages one connection.
"""
def stopConnecting():
"""
Stop attempting to connect.
"""
def disconnect():
"""
Disconnect regardless of the connection state.
If we are connected, disconnect, if we are trying to connect,
stop trying.
"""
def connect():
"""
Try to connect to remote address.
"""
def getDestination():
"""
Return destination this will try to connect to.
@return: An object which provides L{IAddress}.
"""
class IResolverSimple(Interface):
def getHostByName(name, timeout = (1, 3, 11, 45)):
"""
Resolve the domain name C{name} into an IP address.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{twisted.internet.defer.Deferred}
@return: The callback of the Deferred that is returned will be
passed a string that represents the IP address of the
specified name, or the errback will be called if the
lookup times out. If multiple types of address records
are associated with the name, A6 records will be returned
in preference to AAAA records, which will be returned in
preference to A records. If there are multiple records of
the type to be returned, one will be selected at random.
@raise twisted.internet.defer.TimeoutError: Raised
(asynchronously) if the name cannot be resolved within the
specified timeout period.
"""
class IResolver(IResolverSimple):
def query(query, timeout=None):
"""
Dispatch C{query} to the method which can handle its type.
@type query: L{twisted.names.dns.Query}
@param query: The DNS query being issued, to which a response is to be
generated.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAddress(name, timeout=None):
"""
Perform an A record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAddress6(name, timeout=None):
"""
Perform an A6 record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupIPV6Address(name, timeout=None):
"""
Perform an AAAA record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailExchange(name, timeout=None):
"""
Perform an MX record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupNameservers(name, timeout=None):
"""
Perform an NS record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupCanonicalName(name, timeout=None):
"""
Perform a CNAME record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailBox(name, timeout=None):
"""
Perform an MB record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailGroup(name, timeout=None):
"""
Perform an MG record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailRename(name, timeout=None):
"""
Perform an MR record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupPointer(name, timeout=None):
"""
Perform a PTR record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAuthority(name, timeout=None):
"""
Perform an SOA record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupNull(name, timeout=None):
"""
Perform a NULL record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupWellKnownServices(name, timeout=None):
"""
Perform a WKS record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupHostInfo(name, timeout=None):
"""
Perform a HINFO record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupMailboxInfo(name, timeout=None):
"""
Perform an MINFO record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupText(name, timeout=None):
"""
Perform a TXT record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupResponsibility(name, timeout=None):
"""
Perform an RP record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAFSDatabase(name, timeout=None):
"""
Perform an AFSDB record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupService(name, timeout=None):
"""
Perform an SRV record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupAllRecords(name, timeout=None):
"""
Perform an ALL_RECORD lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupSenderPolicy(name, timeout= 10):
"""
Perform a SPF record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupNamingAuthorityPointer(name, timeout=None):
"""
Perform a NAPTR record lookup.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: Sequence of C{int}
@param timeout: Number of seconds after which to reissue the query.
When the last timeout expires, the query is considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances. The first element of the
tuple gives answers. The second element of the tuple gives
authorities. The third element of the tuple gives additional
information. The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
def lookupZone(name, timeout=None):
"""
Perform an AXFR record lookup.
NB This is quite different from other DNS requests. See
U{http://cr.yp.to/djbdns/axfr-notes.html} for more
information.
NB Unlike other C{lookup*} methods, the timeout here is not a
list of ints, it is a single int.
@type name: C{str}
@param name: DNS name to resolve.
@type timeout: C{int}
@param timeout: When this timeout expires, the query is
considered failed.
@rtype: L{Deferred}
@return: A L{Deferred} which fires with a three-tuple of lists of
L{twisted.names.dns.RRHeader} instances.
The first element of the tuple gives answers.
The second and third elements are always empty.
The L{Deferred} may instead fail with one of the
exceptions defined in L{twisted.names.error} or with
C{NotImplementedError}.
"""
class IReactorTCP(Interface):
def listenTCP(port, factory, backlog=50, interface=''):
"""
Connects a given protocol factory to the given numeric TCP/IP port.
@param port: a port number on which to listen
@param factory: a L{twisted.internet.protocol.ServerFactory} instance
@param backlog: size of the listen queue
@param interface: The local IPv4 or IPv6 address to which to bind;
defaults to '', ie all IPv4 addresses. To bind to all IPv4 and IPv6
addresses, you must call this method twice.
@return: an object that provides L{IListeningPort}.
@raise CannotListenError: as defined here
L{twisted.internet.error.CannotListenError},
if it cannot listen on this port (e.g., it
cannot bind to the required port number)
"""
def connectTCP(host, port, factory, timeout=30, bindAddress=None):
"""
Connect a TCP client.
@param host: A hostname or an IPv4 or IPv6 address literal.
@type host: L{bytes}
@param port: a port number
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port) tuple of local address to bind
to, or None.
@return: An object which provides L{IConnector}. This connector will
call various callbacks on the factory when a connection is
made, failed, or lost - see
L{ClientFactory<twisted.internet.protocol.ClientFactory>}
docs for details.
"""
class IReactorSSL(Interface):
def connectSSL(host, port, factory, contextFactory, timeout=30, bindAddress=None):
"""
Connect a client Protocol to a remote SSL socket.
@param host: a host name
@param port: a port number
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param contextFactory: a L{twisted.internet.ssl.ClientContextFactory} object.
@param timeout: number of seconds to wait before assuming the
connection has failed.
@param bindAddress: a (host, port) tuple of local address to bind to,
or C{None}.
@return: An object which provides L{IConnector}.
"""
def listenSSL(port, factory, contextFactory, backlog=50, interface=''):
"""
Connects a given protocol factory to the given numeric TCP/IP port.
The connection is a SSL one, using contexts created by the context
factory.
@param port: a port number on which to listen
@param factory: a L{twisted.internet.protocol.ServerFactory} instance
@param contextFactory: a L{twisted.internet.ssl.ContextFactory} instance
@param backlog: size of the listen queue
@param interface: the hostname to bind to, defaults to '' (all)
"""
class IReactorUNIX(Interface):
"""
UNIX socket methods.
"""
def connectUNIX(address, factory, timeout=30, checkPID=0):
"""
Connect a client protocol to a UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param factory: a L{twisted.internet.protocol.ClientFactory} instance
@param timeout: number of seconds to wait before assuming the connection
has failed.
@param checkPID: if True, check for a pid file to verify that a server
is listening. If C{address} is a Linux abstract namespace path,
this must be C{False}.
@return: An object which provides L{IConnector}.
"""
def listenUNIX(address, factory, backlog=50, mode=0o666, wantPID=0):
"""
Listen on a UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param factory: a L{twisted.internet.protocol.Factory} instance.
@param backlog: number of connections to allow in backlog.
@param mode: The mode (B{not} umask) to set on the unix socket. See
platform specific documentation for information about how this
might affect connection attempts.
@type mode: C{int}
@param wantPID: if True, create a pidfile for the socket. If C{address}
is a Linux abstract namespace path, this must be C{False}.
@return: An object which provides L{IListeningPort}.
"""
class IReactorUNIXDatagram(Interface):
"""
Datagram UNIX socket methods.
"""
def connectUNIXDatagram(address, protocol, maxPacketSize=8192, mode=0o666, bindAddress=None):
"""
Connect a client protocol to a datagram UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param protocol: a L{twisted.internet.protocol.ConnectedDatagramProtocol} instance
@param maxPacketSize: maximum packet size to accept
@param mode: The mode (B{not} umask) to set on the unix socket. See
platform specific documentation for information about how this
might affect connection attempts.
@type mode: C{int}
@param bindAddress: address to bind to
@return: An object which provides L{IConnector}.
"""
def listenUNIXDatagram(address, protocol, maxPacketSize=8192, mode=0o666):
"""
Listen on a datagram UNIX socket.
@param address: a path to a unix socket on the filesystem.
@param protocol: a L{twisted.internet.protocol.DatagramProtocol} instance.
@param maxPacketSize: maximum packet size to accept
@param mode: The mode (B{not} umask) to set on the unix socket. See
platform specific documentation for information about how this
might affect connection attempts.
@type mode: C{int}
@return: An object which provides L{IListeningPort}.
"""
class IReactorWin32Events(Interface):
"""
Win32 Event API methods
@since: 10.2
"""
def addEvent(event, fd, action):
"""
Add a new win32 event to the event loop.
@param event: a Win32 event object created using win32event.CreateEvent()
@param fd: an instance of L{twisted.internet.abstract.FileDescriptor}
@param action: a string that is a method name of the fd instance.
This method is called in response to the event.
@return: None
"""
def removeEvent(event):
"""
Remove an event.
@param event: a Win32 event object added using L{IReactorWin32Events.addEvent}
@return: None
"""
class IReactorUDP(Interface):
"""
UDP socket methods.
"""
def listenUDP(port, protocol, interface='', maxPacketSize=8192):
"""
Connects a given L{DatagramProtocol} to the given numeric UDP port.
@param port: A port number on which to listen.
@type port: C{int}
@param protocol: A L{DatagramProtocol} instance which will be
connected to the given C{port}.
@type protocol: L{DatagramProtocol}
@param interface: The local IPv4 or IPv6 address to which to bind;
defaults to '', ie all IPv4 addresses.
@type interface: C{str}
@param maxPacketSize: The maximum packet size to accept.
@type maxPacketSize: C{int}
@return: object which provides L{IListeningPort}.
"""
class IReactorMulticast(Interface):
"""
UDP socket methods that support multicast.
IMPORTANT: This is an experimental new interface. It may change
without backwards compatability. Suggestions are welcome.
"""
def listenMulticast(port, protocol, interface='', maxPacketSize=8192,
listenMultiple=False):
"""
Connects a given
L{DatagramProtocol<twisted.internet.protocol.DatagramProtocol>} to the
given numeric UDP port.
@param listenMultiple: If set to True, allows multiple sockets to
bind to the same address and port number at the same time.
@type listenMultiple: C{bool}
@returns: An object which provides L{IListeningPort}.
@see: L{twisted.internet.interfaces.IMulticastTransport}
@see: U{http://twistedmatrix.com/documents/current/core/howto/udp.html}
"""
class IReactorSocket(Interface):
"""
Methods which allow a reactor to use externally created sockets.
For example, to use C{adoptStreamPort} to implement behavior equivalent
to that of L{IReactorTCP.listenTCP}, you might write code like this::
from socket import SOMAXCONN, AF_INET, SOCK_STREAM, socket
portSocket = socket(AF_INET, SOCK_STREAM)
# Set FD_CLOEXEC on port, left as an exercise. Then make it into a
# non-blocking listening port:
portSocket.setblocking(False)
portSocket.bind(('192.168.1.2', 12345))
portSocket.listen(SOMAXCONN)
# Now have the reactor use it as a TCP port
port = reactor.adoptStreamPort(
portSocket.fileno(), AF_INET, YourFactory())
# portSocket itself is no longer necessary, and needs to be cleaned
# up by us.
portSocket.close()
# Whenever the server is no longer needed, stop it as usual.
stoppedDeferred = port.stopListening()
Another potential use is to inherit a listening descriptor from a parent
process (for example, systemd or launchd), or to receive one over a UNIX
domain socket.
Some plans for extending this interface exist. See:
- U{http://twistedmatrix.com/trac/ticket/5573}: AF_UNIX SOCK_STREAM ports
- U{http://twistedmatrix.com/trac/ticket/6594}: AF_UNIX SOCK_DGRAM ports
"""
def adoptStreamPort(fileDescriptor, addressFamily, factory):
"""
Add an existing listening I{SOCK_STREAM} socket to the reactor to
monitor for new connections to accept and handle.
@param fileDescriptor: A file descriptor associated with a socket which
is already bound to an address and marked as listening. The socket
must be set non-blocking. Any additional flags (for example,
close-on-exec) must also be set by application code. Application
code is responsible for closing the file descriptor, which may be
done as soon as C{adoptStreamPort} returns.
@type fileDescriptor: C{int}
@param addressFamily: The address family (or I{domain}) of the socket.
For example, L{socket.AF_INET6}.
@param factory: A L{ServerFactory} instance to use to create new
protocols to handle connections accepted via this socket.
@return: An object providing L{IListeningPort}.
@raise twisted.internet.error.UnsupportedAddressFamily: If the
given address family is not supported by this reactor, or
not supported with the given socket type.
@raise twisted.internet.error.UnsupportedSocketType: If the
given socket type is not supported by this reactor, or not
supported with the given socket type.
"""
def adoptStreamConnection(fileDescriptor, addressFamily, factory):
"""
Add an existing connected I{SOCK_STREAM} socket to the reactor to
monitor for data.
Note that the given factory won't have its C{startFactory} and
C{stopFactory} methods called, as there is no sensible time to call
them in this situation.
@param fileDescriptor: A file descriptor associated with a socket which
is already connected. The socket must be set non-blocking. Any
additional flags (for example, close-on-exec) must also be set by
application code. Application code is responsible for closing the
file descriptor, which may be done as soon as
C{adoptStreamConnection} returns.
@type fileDescriptor: C{int}
@param addressFamily: The address family (or I{domain}) of the socket.
For example, L{socket.AF_INET6}.
@param factory: A L{ServerFactory} instance to use to create a new
protocol to handle the connection via this socket.
@raise UnsupportedAddressFamily: If the given address family is not
supported by this reactor, or not supported with the given socket
type.
@raise UnsupportedSocketType: If the given socket type is not supported
by this reactor, or not supported with the given socket type.
"""
def adoptDatagramPort(fileDescriptor, addressFamily, protocol,
maxPacketSize=8192):
"""
Add an existing listening I{SOCK_DGRAM} socket to the reactor to
monitor for read and write readiness.
@param fileDescriptor: A file descriptor associated with a socket which
is already bound to an address and marked as listening. The socket
must be set non-blocking. Any additional flags (for example,
close-on-exec) must also be set by application code. Application
code is responsible for closing the file descriptor, which may be
done as soon as C{adoptDatagramPort} returns.
@type fileDescriptor: C{int}
@param addressFamily: The address family (or I{domain}) of the socket.
For example, L{socket.AF_INET6}.
@type addressFamily: C{int}
@param protocol: A L{DatagramProtocol} instance to connect to
a UDP transport.
@type protocol: L{DatagramProtocol}
@param maxPacketSize: The maximum packet size to accept.
@type maxPacketSize: C{int}
@return: An object providing L{IListeningPort}.
@raise L{UnsupportedAddressFamily}: If the given address family is not
supported by this reactor, or not supported with the given socket
type.
@raise UnsupportedSocketType: If the given socket type is not supported
by this reactor, or not supported with the given socket type.
"""
class IReactorProcess(Interface):
def spawnProcess(processProtocol, executable, args=(), env={}, path=None,
uid=None, gid=None, usePTY=0, childFDs=None):
"""
Spawn a process, with a process protocol.
@type processProtocol: L{IProcessProtocol} provider
@param processProtocol: An object which will be notified of all
events related to the created process.
@param executable: the file name to spawn - the full path should be
used.
@param args: the command line arguments to pass to the process; a
sequence of strings. The first string should be the
executable's name.
@type env: a C{dict} mapping C{str} to C{str}, or C{None}.
@param env: the environment variables to pass to the child process. The
resulting behavior varies between platforms. If
- C{env} is not set:
- On POSIX: pass an empty environment.
- On Windows: pass C{os.environ}.
- C{env} is C{None}:
- On POSIX: pass C{os.environ}.
- On Windows: pass C{os.environ}.
- C{env} is a C{dict}:
- On POSIX: pass the key/value pairs in C{env} as the
complete environment.
- On Windows: update C{os.environ} with the key/value
pairs in the C{dict} before passing it. As a
consequence of U{bug #1640
<http://twistedmatrix.com/trac/ticket/1640>}, passing
keys with empty values in an effort to unset
environment variables I{won't} unset them.
@param path: the path to run the subprocess in - defaults to the
current directory.
@param uid: user ID to run the subprocess as. (Only available on
POSIX systems.)
@param gid: group ID to run the subprocess as. (Only available on
POSIX systems.)
@param usePTY: if true, run this process in a pseudo-terminal.
optionally a tuple of C{(masterfd, slavefd, ttyname)},
in which case use those file descriptors.
(Not available on all systems.)
@param childFDs: A dictionary mapping file descriptors in the new child
process to an integer or to the string 'r' or 'w'.
If the value is an integer, it specifies a file
descriptor in the parent process which will be mapped
to a file descriptor (specified by the key) in the
child process. This is useful for things like inetd
and shell-like file redirection.
If it is the string 'r', a pipe will be created and
attached to the child at that file descriptor: the
child will be able to write to that file descriptor
and the parent will receive read notification via the
L{IProcessProtocol.childDataReceived} callback. This
is useful for the child's stdout and stderr.
If it is the string 'w', similar setup to the previous
case will occur, with the pipe being readable by the
child instead of writeable. The parent process can
write to that file descriptor using
L{IProcessTransport.writeToChild}. This is useful for
the child's stdin.
If childFDs is not passed, the default behaviour is to
use a mapping that opens the usual stdin/stdout/stderr
pipes.
@see: L{twisted.internet.protocol.ProcessProtocol}
@return: An object which provides L{IProcessTransport}.
@raise OSError: Raised with errno C{EAGAIN} or C{ENOMEM} if there are
insufficient system resources to create a new process.
"""
class IReactorTime(Interface):
"""
Time methods that a Reactor should implement.
"""
def seconds():
"""
Get the current time in seconds.
@return: A number-like object of some sort.
"""
def callLater(delay, callable, *args, **kw):
"""
Call a function later.
@type delay: C{float}
@param delay: the number of seconds to wait.
@param callable: the callable object to call later.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: An object which provides L{IDelayedCall} and can be used to
cancel the scheduled call, by calling its C{cancel()} method.
It also may be rescheduled by calling its C{delay()} or
C{reset()} methods.
"""
def getDelayedCalls():
"""
Retrieve all currently scheduled delayed calls.
@return: A tuple of all L{IDelayedCall} providers representing all
currently scheduled calls. This is everything that has been
returned by C{callLater} but not yet called or canceled.
"""
class IDelayedCall(Interface):
"""
A scheduled call.
There are probably other useful methods we can add to this interface;
suggestions are welcome.
"""
def getTime():
"""
Get time when delayed call will happen.
@return: time in seconds since epoch (a float).
"""
def cancel():
"""
Cancel the scheduled call.
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def delay(secondsLater):
"""
Delay the scheduled call.
@param secondsLater: how many seconds from its current firing time to delay
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def reset(secondsFromNow):
"""
Reset the scheduled call's timer.
@param secondsFromNow: how many seconds from now it should fire,
equivalent to C{.cancel()} and then doing another
C{reactor.callLater(secondsLater, ...)}
@raises twisted.internet.error.AlreadyCalled: if the call has already
happened.
@raises twisted.internet.error.AlreadyCancelled: if the call has already
been cancelled.
"""
def active():
"""
@return: True if this call is still active, False if it has been
called or cancelled.
"""
class IReactorThreads(Interface):
"""
Dispatch methods to be run in threads.
Internally, this should use a thread pool and dispatch methods to them.
"""
def getThreadPool():
"""
Return the threadpool used by L{callInThread}. Create it first if
necessary.
@rtype: L{twisted.python.threadpool.ThreadPool}
"""
def callInThread(callable, *args, **kwargs):
"""
Run the callable object in a separate thread.
"""
def callFromThread(callable, *args, **kw):
"""
Cause a function to be executed by the reactor thread.
Use this method when you want to run a function in the reactor's thread
from another thread. Calling L{callFromThread} should wake up the main
thread (where L{reactor.run()<reactor.run>} is executing) and run the
given callable in that thread.
If you're writing a multi-threaded application the C{callable} may need
to be thread safe, but this method doesn't require it as such. If you
want to call a function in the next mainloop iteration, but you're in
the same thread, use L{callLater} with a delay of 0.
"""
def suggestThreadPoolSize(size):
"""
Suggest the size of the internal threadpool used to dispatch functions
passed to L{callInThread}.
"""
class IReactorCore(Interface):
"""
Core methods that a Reactor must implement.
"""
running = Attribute(
"A C{bool} which is C{True} from I{during startup} to "
"I{during shutdown} and C{False} the rest of the time.")
def resolve(name, timeout=10):
"""
Return a L{twisted.internet.defer.Deferred} that will resolve a hostname.
"""
def run():
"""
Fire 'startup' System Events, move the reactor to the 'running'
state, then run the main loop until it is stopped with C{stop()} or
C{crash()}.
"""
def stop():
"""
Fire 'shutdown' System Events, which will move the reactor to the
'stopped' state and cause C{reactor.run()} to exit.
"""
def crash():
"""
Stop the main loop *immediately*, without firing any system events.
This is named as it is because this is an extremely "rude" thing to do;
it is possible to lose data and put your system in an inconsistent
state by calling this. However, it is necessary, as sometimes a system
can become wedged in a pre-shutdown call.
"""
def iterate(delay=0):
"""
Run the main loop's I/O polling function for a period of time.
This is most useful in applications where the UI is being drawn "as
fast as possible", such as games. All pending L{IDelayedCall}s will
be called.
The reactor must have been started (via the C{run()} method) prior to
any invocations of this method. It must also be stopped manually
after the last call to this method (via the C{stop()} method). This
method is not re-entrant: you must not call it recursively; in
particular, you must not call it while the reactor is running.
"""
def fireSystemEvent(eventType):
"""
Fire a system-wide event.
System-wide events are things like 'startup', 'shutdown', and
'persist'.
"""
def addSystemEventTrigger(phase, eventType, callable, *args, **kw):
"""
Add a function to be called when a system event occurs.
Each "system event" in Twisted, such as 'startup', 'shutdown', and
'persist', has 3 phases: 'before', 'during', and 'after' (in that
order, of course). These events will be fired internally by the
Reactor.
An implementor of this interface must only implement those events
described here.
Callbacks registered for the "before" phase may return either None or a
Deferred. The "during" phase will not execute until all of the
Deferreds from the "before" phase have fired.
Once the "during" phase is running, all of the remaining triggers must
execute; their return values must be ignored.
@param phase: a time to call the event -- either the string 'before',
'after', or 'during', describing when to call it
relative to the event's execution.
@param eventType: this is a string describing the type of event.
@param callable: the object to call before shutdown.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: an ID that can be used to remove this call with
removeSystemEventTrigger.
"""
def removeSystemEventTrigger(triggerID):
"""
Removes a trigger added with addSystemEventTrigger.
@param triggerID: a value returned from addSystemEventTrigger.
@raise KeyError: If there is no system event trigger for the given
C{triggerID}.
@raise ValueError: If there is no system event trigger for the given
C{triggerID}.
@raise TypeError: If there is no system event trigger for the given
C{triggerID}.
"""
def callWhenRunning(callable, *args, **kw):
"""
Call a function when the reactor is running.
If the reactor has not started, the callable will be scheduled
to run when it does start. Otherwise, the callable will be invoked
immediately.
@param callable: the callable object to call later.
@param args: the arguments to call it with.
@param kw: the keyword arguments to call it with.
@return: None if the callable was invoked, otherwise a system
event id for the scheduled call.
"""
class IReactorPluggableResolver(Interface):
"""
A reactor with a pluggable name resolver interface.
"""
def installResolver(resolver):
"""
Set the internal resolver to use to for name lookups.
@type resolver: An object implementing the L{IResolverSimple} interface
@param resolver: The new resolver to use.
@return: The previously installed resolver.
"""
class IReactorDaemonize(Interface):
"""
A reactor which provides hooks that need to be called before and after
daemonization.
Notes:
- This interface SHOULD NOT be called by applications.
- This interface should only be implemented by reactors as a workaround
(in particular, it's implemented currently only by kqueue()).
For details please see the comments on ticket #1918.
"""
def beforeDaemonize():
"""
Hook to be called immediately before daemonization. No reactor methods
may be called until L{afterDaemonize} is called.
@return: C{None}.
"""
def afterDaemonize():
"""
Hook to be called immediately after daemonization. This may only be
called after L{beforeDaemonize} had been called previously.
@return: C{None}.
"""
class IReactorFDSet(Interface):
"""
Implement me to be able to use L{IFileDescriptor} type resources.
This assumes that your main-loop uses UNIX-style numeric file descriptors
(or at least similarly opaque IDs returned from a .fileno() method)
"""
def addReader(reader):
"""
I add reader to the set of file descriptors to get read events for.
@param reader: An L{IReadDescriptor} provider that will be checked for
read events until it is removed from the reactor with
L{removeReader}.
@return: C{None}.
"""
def addWriter(writer):
"""
I add writer to the set of file descriptors to get write events for.
@param writer: An L{IWriteDescriptor} provider that will be checked for
write events until it is removed from the reactor with
L{removeWriter}.
@return: C{None}.
"""
def removeReader(reader):
"""
Removes an object previously added with L{addReader}.
@return: C{None}.
"""
def removeWriter(writer):
"""
Removes an object previously added with L{addWriter}.
@return: C{None}.
"""
def removeAll():
"""
Remove all readers and writers.
Should not remove reactor internal reactor connections (like a waker).
@return: A list of L{IReadDescriptor} and L{IWriteDescriptor} providers
which were removed.
"""
def getReaders():
"""
Return the list of file descriptors currently monitored for input
events by the reactor.
@return: the list of file descriptors monitored for input events.
@rtype: C{list} of C{IReadDescriptor}
"""
def getWriters():
"""
Return the list file descriptors currently monitored for output events
by the reactor.
@return: the list of file descriptors monitored for output events.
@rtype: C{list} of C{IWriteDescriptor}
"""
class IListeningPort(Interface):
"""
A listening port.
"""
def startListening():
"""
Start listening on this port.
@raise CannotListenError: If it cannot listen on this port (e.g., it is
a TCP port and it cannot bind to the required
port number).
"""
def stopListening():
"""
Stop listening on this port.
If it does not complete immediately, will return Deferred that fires
upon completion.
"""
def getHost():
"""
Get the host that this port is listening for.
@return: An L{IAddress} provider.
"""
class ILoggingContext(Interface):
"""
Give context information that will be used to log events generated by
this item.
"""
def logPrefix():
"""
@return: Prefix used during log formatting to indicate context.
@rtype: C{str}
"""
class IFileDescriptor(ILoggingContext):
"""
An interface representing a UNIX-style numeric file descriptor.
"""
def fileno():
"""
@raise: If the descriptor no longer has a valid file descriptor
number associated with it.
@return: The platform-specified representation of a file descriptor
number. Or C{-1} if the descriptor no longer has a valid file
descriptor number associated with it. As long as the descriptor
is valid, calls to this method on a particular instance must
return the same value.
"""
def connectionLost(reason):
"""
Called when the connection was lost.
This is called when the connection on a selectable object has been
lost. It will be called whether the connection was closed explicitly,
an exception occurred in an event handler, or the other end of the
connection closed it first.
See also L{IHalfCloseableDescriptor} if your descriptor wants to be
notified separately of the two halves of the connection being closed.
@param reason: A failure instance indicating the reason why the
connection was lost. L{error.ConnectionLost} and
L{error.ConnectionDone} are of special note, but the
failure may be of other classes as well.
"""
class IReadDescriptor(IFileDescriptor):
"""
An L{IFileDescriptor} that can read.
This interface is generally used in conjunction with L{IReactorFDSet}.
"""
def doRead():
"""
Some data is available for reading on your descriptor.
@return: If an error is encountered which causes the descriptor to
no longer be valid, a L{Failure} should be returned. Otherwise,
C{None}.
"""
class IWriteDescriptor(IFileDescriptor):
"""
An L{IFileDescriptor} that can write.
This interface is generally used in conjunction with L{IReactorFDSet}.
"""
def doWrite():
"""
Some data can be written to your descriptor.
@return: If an error is encountered which causes the descriptor to
no longer be valid, a L{Failure} should be returned. Otherwise,
C{None}.
"""
class IReadWriteDescriptor(IReadDescriptor, IWriteDescriptor):
"""
An L{IFileDescriptor} that can both read and write.
"""
class IHalfCloseableDescriptor(Interface):
"""
A descriptor that can be half-closed.
"""
def writeConnectionLost(reason):
"""
Indicates write connection was lost.
"""
def readConnectionLost(reason):
"""
Indicates read connection was lost.
"""
class ISystemHandle(Interface):
"""
An object that wraps a networking OS-specific handle.
"""
def getHandle():
"""
Return a system- and reactor-specific handle.
This might be a socket.socket() object, or some other type of
object, depending on which reactor is being used. Use and
manipulate at your own risk.
This might be used in cases where you want to set specific
options not exposed by the Twisted APIs.
"""
class IConsumer(Interface):
"""
A consumer consumes data from a producer.
"""
def registerProducer(producer, streaming):
"""
Register to receive data from a producer.
This sets self to be a consumer for a producer. When this object runs
out of data (as when a send(2) call on a socket succeeds in moving the
last data from a userspace buffer into a kernelspace buffer), it will
ask the producer to resumeProducing().
For L{IPullProducer} providers, C{resumeProducing} will be called once
each time data is required.
For L{IPushProducer} providers, C{pauseProducing} will be called
whenever the write buffer fills up and C{resumeProducing} will only be
called when it empties.
@type producer: L{IProducer} provider
@type streaming: C{bool}
@param streaming: C{True} if C{producer} provides L{IPushProducer},
C{False} if C{producer} provides L{IPullProducer}.
@raise RuntimeError: If a producer is already registered.
@return: C{None}
"""
def unregisterProducer():
"""
Stop consuming data from a producer, without disconnecting.
"""
def write(data):
"""
The producer will write data by calling this method.
The implementation must be non-blocking and perform whatever
buffering is necessary. If the producer has provided enough data
for now and it is a L{IPushProducer}, the consumer may call its
C{pauseProducing} method.
"""
class IProducer(Interface):
"""
A producer produces data for a consumer.
Typically producing is done by calling the write method of an class
implementing L{IConsumer}.
"""
def stopProducing():
"""
Stop producing data.
This tells a producer that its consumer has died, so it must stop
producing data for good.
"""
class IPushProducer(IProducer):
"""
A push producer, also known as a streaming producer is expected to
produce (write to this consumer) data on a continuous basis, unless
it has been paused. A paused push producer will resume producing
after its resumeProducing() method is called. For a push producer
which is not pauseable, these functions may be noops.
"""
def pauseProducing():
"""
Pause producing data.
Tells a producer that it has produced too much data to process for
the time being, and to stop until resumeProducing() is called.
"""
def resumeProducing():
"""
Resume producing data.
This tells a producer to re-add itself to the main loop and produce
more data for its consumer.
"""
class IPullProducer(IProducer):
"""
A pull producer, also known as a non-streaming producer, is
expected to produce data each time resumeProducing() is called.
"""
def resumeProducing():
"""
Produce data for the consumer a single time.
This tells a producer to produce data for the consumer once
(not repeatedly, once only). Typically this will be done
by calling the consumer's write() method a single time with
produced data.
"""
class IProtocol(Interface):
def dataReceived(data):
"""
Called whenever data is received.
Use this method to translate to a higher-level message. Usually, some
callback will be made upon the receipt of each complete protocol
message.
@param data: a string of indeterminate length. Please keep in mind
that you will probably need to buffer some data, as partial
(or multiple) protocol messages may be received! I recommend
that unit tests for protocols call through to this method with
differing chunk sizes, down to one byte at a time.
"""
def connectionLost(reason):
"""
Called when the connection is shut down.
Clear any circular references here, and any external references
to this Protocol. The connection has been closed. The C{reason}
Failure wraps a L{twisted.internet.error.ConnectionDone} or
L{twisted.internet.error.ConnectionLost} instance (or a subclass
of one of those).
@type reason: L{twisted.python.failure.Failure}
"""
def makeConnection(transport):
"""
Make a connection to a transport and a server.
"""
def connectionMade():
"""
Called when a connection is made.
This may be considered the initializer of the protocol, because
it is called when the connection is completed. For clients,
this is called once the connection to the server has been
established; for servers, this is called after an accept() call
stops blocking and a socket has been received. If you need to
send any greeting or initial message, do it here.
"""
class IProcessProtocol(Interface):
"""
Interface for process-related event handlers.
"""
def makeConnection(process):
"""
Called when the process has been created.
@type process: L{IProcessTransport} provider
@param process: An object representing the process which has been
created and associated with this protocol.
"""
def childDataReceived(childFD, data):
"""
Called when data arrives from the child process.
@type childFD: C{int}
@param childFD: The file descriptor from which the data was
received.
@type data: C{str}
@param data: The data read from the child's file descriptor.
"""
def childConnectionLost(childFD):
"""
Called when a file descriptor associated with the child process is
closed.
@type childFD: C{int}
@param childFD: The file descriptor which was closed.
"""
def processExited(reason):
"""
Called when the child process exits.
@type reason: L{twisted.python.failure.Failure}
@param reason: A failure giving the reason the child process
terminated. The type of exception for this failure is either
L{twisted.internet.error.ProcessDone} or
L{twisted.internet.error.ProcessTerminated}.
@since: 8.2
"""
def processEnded(reason):
"""
Called when the child process exits and all file descriptors associated
with it have been closed.
@type reason: L{twisted.python.failure.Failure}
@param reason: A failure giving the reason the child process
terminated. The type of exception for this failure is either
L{twisted.internet.error.ProcessDone} or
L{twisted.internet.error.ProcessTerminated}.
"""
class IHalfCloseableProtocol(Interface):
"""
Implemented to indicate they want notification of half-closes.
TCP supports the notion of half-closing the connection, e.g.
closing the write side but still not stopping reading. A protocol
that implements this interface will be notified of such events,
instead of having connectionLost called.
"""
def readConnectionLost():
"""
Notification of the read connection being closed.
This indicates peer did half-close of write side. It is now
the responsibility of the this protocol to call
loseConnection(). In addition, the protocol MUST make sure a
reference to it still exists (i.e. by doing a callLater with
one of its methods, etc.) as the reactor will only have a
reference to it if it is writing.
If the protocol does not do so, it might get garbage collected
without the connectionLost method ever being called.
"""
def writeConnectionLost():
"""
Notification of the write connection being closed.
This will never be called for TCP connections as TCP does not
support notification of this type of half-close.
"""
class IFileDescriptorReceiver(Interface):
"""
Protocols may implement L{IFileDescriptorReceiver} to receive file
descriptors sent to them. This is useful in conjunction with
L{IUNIXTransport}, which allows file descriptors to be sent between
processes on a single host.
"""
def fileDescriptorReceived(descriptor):
"""
Called when a file descriptor is received over the connection.
@param descriptor: The descriptor which was received.
@type descriptor: C{int}
@return: C{None}
"""
class IProtocolFactory(Interface):
"""
Interface for protocol factories.
"""
def buildProtocol(addr):
"""
Called when a connection has been established to addr.
If None is returned, the connection is assumed to have been refused,
and the Port will close the connection.
@type addr: (host, port)
@param addr: The address of the newly-established connection
@return: None if the connection was refused, otherwise an object
providing L{IProtocol}.
"""
def doStart():
"""
Called every time this is connected to a Port or Connector.
"""
def doStop():
"""
Called every time this is unconnected from a Port or Connector.
"""
class ITransport(Interface):
"""
I am a transport for bytes.
I represent (and wrap) the physical connection and synchronicity
of the framework which is talking to the network. I make no
representations about whether calls to me will happen immediately
or require returning to a control loop, or whether they will happen
in the same or another thread. Consider methods of this class
(aside from getPeer) to be 'thrown over the wall', to happen at some
indeterminate time.
"""
def write(data):
"""
Write some data to the physical connection, in sequence, in a
non-blocking fashion.
If possible, make sure that it is all written. No data will
ever be lost, although (obviously) the connection may be closed
before it all gets through.
"""
def writeSequence(data):
"""
Write a list of strings to the physical connection.
If possible, make sure that all of the data is written to
the socket at once, without first copying it all into a
single string.
"""
def loseConnection():
"""
Close my connection, after writing all pending data.
Note that if there is a registered producer on a transport it
will not be closed until the producer has been unregistered.
"""
def getPeer():
"""
Get the remote address of this connection.
Treat this method with caution. It is the unfortunate result of the
CGI and Jabber standards, but should not be considered reliable for
the usual host of reasons; port forwarding, proxying, firewalls, IP
masquerading, etc.
@return: An L{IAddress} provider.
"""
def getHost():
"""
Similar to getPeer, but returns an address describing this side of the
connection.
@return: An L{IAddress} provider.
"""
class ITCPTransport(ITransport):
"""
A TCP based transport.
"""
def loseWriteConnection():
"""
Half-close the write side of a TCP connection.
If the protocol instance this is attached to provides
IHalfCloseableProtocol, it will get notified when the operation is
done. When closing write connection, as with loseConnection this will
only happen when buffer has emptied and there is no registered
producer.
"""
def abortConnection():
"""
Close the connection abruptly.
Discards any buffered data, stops any registered producer,
and, if possible, notifies the other end of the unclean
closure.
@since: 11.1
"""
def getTcpNoDelay():
"""
Return if C{TCP_NODELAY} is enabled.
"""
def setTcpNoDelay(enabled):
"""
Enable/disable C{TCP_NODELAY}.
Enabling C{TCP_NODELAY} turns off Nagle's algorithm. Small packets are
sent sooner, possibly at the expense of overall throughput.
"""
def getTcpKeepAlive():
"""
Return if C{SO_KEEPALIVE} is enabled.
"""
def setTcpKeepAlive(enabled):
"""
Enable/disable C{SO_KEEPALIVE}.
Enabling C{SO_KEEPALIVE} sends packets periodically when the connection
is otherwise idle, usually once every two hours. They are intended
to allow detection of lost peers in a non-infinite amount of time.
"""
def getHost():
"""
Returns L{IPv4Address} or L{IPv6Address}.
"""
def getPeer():
"""
Returns L{IPv4Address} or L{IPv6Address}.
"""
class IUNIXTransport(ITransport):
"""
Transport for stream-oriented unix domain connections.
"""
def sendFileDescriptor(descriptor):
"""
Send a duplicate of this (file, socket, pipe, etc) descriptor to the
other end of this connection.
The send is non-blocking and will be queued if it cannot be performed
immediately. The send will be processed in order with respect to other
C{sendFileDescriptor} calls on this transport, but not necessarily with
respect to C{write} calls on this transport. The send can only be
processed if there are also bytes in the normal connection-oriented send
buffer (ie, you must call C{write} at least as many times as you call
C{sendFileDescriptor}).
@param descriptor: An C{int} giving a valid file descriptor in this
process. Note that a I{file descriptor} may actually refer to a
socket, a pipe, or anything else POSIX tries to treat in the same
way as a file.
@return: C{None}
"""
class IOpenSSLServerConnectionCreator(Interface):
"""
A provider of L{IOpenSSLServerConnectionCreator} can create
L{OpenSSL.SSL.Connection} objects for TLS servers.
@see: L{twisted.internet.ssl}
@note: Creating OpenSSL connection objects is subtle, error-prone, and
security-critical. Before implementing this interface yourself,
consider using L{twisted.internet.ssl.CertificateOptions} as your
C{contextFactory}. (For historical reasons, that class does not
actually I{implement} this interface; nevertheless it is usable in all
Twisted APIs which require a provider of this interface.)
"""
def serverConnectionForTLS(tlsProtocol):
"""
Create a connection for the given server protocol.
@param tlsProtocol: the protocol server making the request.
@type tlsProtocol: L{twisted.protocols.tls.TLSMemoryBIOProtocol}.
@return: an OpenSSL connection object configured appropriately for the
given Twisted protocol.
@rtype: L{OpenSSL.SSL.Connection}
"""
class IOpenSSLClientConnectionCreator(Interface):
"""
A provider of L{IOpenSSLClientConnectionCreator} can create
L{OpenSSL.SSL.Connection} objects for TLS clients.
@see: L{twisted.internet.ssl}
@note: Creating OpenSSL connection objects is subtle, error-prone, and
security-critical. Before implementing this interface yourself,
consider using L{twisted.internet.ssl.optionsForClientTLS} as your
C{contextFactory}.
"""
def clientConnectionForTLS(tlsProtocol):
"""
Create a connection for the given client protocol.
@param tlsProtocol: the client protocol making the request.
@type tlsProtocol: L{twisted.protocols.tls.TLSMemoryBIOProtocol}.
@return: an OpenSSL connection object configured appropriately for the
given Twisted protocol.
@rtype: L{OpenSSL.SSL.Connection}
"""
class ITLSTransport(ITCPTransport):
"""
A TCP transport that supports switching to TLS midstream.
Once TLS mode is started the transport will implement L{ISSLTransport}.
"""
def startTLS(contextFactory):
"""
Initiate TLS negotiation.
@param contextFactory: An object which creates appropriately configured
TLS connections.
For clients, use L{twisted.internet.ssl.optionsForClientTLS}; for
servers, use L{twisted.internet.ssl.CertificateOptions}.
@type contextFactory: L{IOpenSSLClientConnectionCreator} or
L{IOpenSSLServerConnectionCreator}, depending on whether this
L{ITLSTransport} is a server or not. If the appropriate interface
is not provided by the value given for C{contextFactory}, it must
be an old-style L{twisted.internet.ssl.ContextFactory} or similar.
"""
class ISSLTransport(ITCPTransport):
"""
A SSL/TLS based transport.
"""
def getPeerCertificate():
"""
Return an object with the peer's certificate info.
"""
class ICipher(Interface):
"""
A TLS cipher.
"""
fullName = Attribute(
"The fully qualified name of the cipher in L{unicode}."
)
class IAcceptableCiphers(Interface):
"""
A list of acceptable ciphers for a TLS context.
"""
def selectCiphers(availableCiphers):
"""
Choose which ciphers to allow to be negotiated on a TLS connection.
@param availableCiphers: A L{list} of L{ICipher} which gives the names
of all ciphers supported by the TLS implementation in use.
@return: A L{list} of L{ICipher} which represents the ciphers
which may be negotiated on the TLS connection. The result is
ordered by preference with more preferred ciphers appearing
earlier.
"""
class IProcessTransport(ITransport):
"""
A process transport.
"""
pid = Attribute(
"From before L{IProcessProtocol.makeConnection} is called to before "
"L{IProcessProtocol.processEnded} is called, C{pid} is an L{int} "
"giving the platform process ID of this process. C{pid} is L{None} "
"at all other times.")
def closeStdin():
"""
Close stdin after all data has been written out.
"""
def closeStdout():
"""
Close stdout.
"""
def closeStderr():
"""
Close stderr.
"""
def closeChildFD(descriptor):
"""
Close a file descriptor which is connected to the child process, identified
by its FD in the child process.
"""
def writeToChild(childFD, data):
"""
Similar to L{ITransport.write} but also allows the file descriptor in
the child process which will receive the bytes to be specified.
@type childFD: C{int}
@param childFD: The file descriptor to which to write.
@type data: C{str}
@param data: The bytes to write.
@return: C{None}
@raise KeyError: If C{childFD} is not a file descriptor that was mapped
in the child when L{IReactorProcess.spawnProcess} was used to create
it.
"""
def loseConnection():
"""
Close stdin, stderr and stdout.
"""
def signalProcess(signalID):
"""
Send a signal to the process.
@param signalID: can be
- one of C{"KILL"}, C{"TERM"}, or C{"INT"}.
These will be implemented in a
cross-platform manner, and so should be used
if possible.
- an integer, where it represents a POSIX
signal ID.
@raise twisted.internet.error.ProcessExitedAlready: If the process has
already exited.
@raise OSError: If the C{os.kill} call fails with an errno different
from C{ESRCH}.
"""
class IServiceCollection(Interface):
"""
An object which provides access to a collection of services.
"""
def getServiceNamed(serviceName):
"""
Retrieve the named service from this application.
Raise a C{KeyError} if there is no such service name.
"""
def addService(service):
"""
Add a service to this collection.
"""
def removeService(service):
"""
Remove a service from this collection.
"""
class IUDPTransport(Interface):
"""
Transport for UDP DatagramProtocols.
"""
def write(packet, addr=None):
"""
Write packet to given address.
@param addr: a tuple of (ip, port). For connected transports must
be the address the transport is connected to, or None.
In non-connected mode this is mandatory.
@raise twisted.internet.error.MessageLengthError: C{packet} was too
long.
"""
def connect(host, port):
"""
Connect the transport to an address.
This changes it to connected mode. Datagrams can only be sent to
this address, and will only be received from this address. In addition
the protocol's connectionRefused method might get called if destination
is not receiving datagrams.
@param host: an IP address, not a domain name ('127.0.0.1', not 'localhost')
@param port: port to connect to.
"""
def getHost():
"""
Get this port's host address.
@return: an address describing the listening port.
@rtype: L{IPv4Address} or L{IPv6Address}.
"""
def stopListening():
"""
Stop listening on this port.
If it does not complete immediately, will return L{Deferred} that fires
upon completion.
"""
def setBroadcastAllowed(enabled):
"""
Set whether this port may broadcast.
@param enabled: Whether the port may broadcast.
@type enabled: L{bool}
"""
def getBroadcastAllowed():
"""
Checks if broadcast is currently allowed on this port.
@return: Whether this port may broadcast.
@rtype: L{bool}
"""
class IUNIXDatagramTransport(Interface):
"""
Transport for UDP PacketProtocols.
"""
def write(packet, address):
"""
Write packet to given address.
"""
def getHost():
"""
Returns L{UNIXAddress}.
"""
class IUNIXDatagramConnectedTransport(Interface):
"""
Transport for UDP ConnectedPacketProtocols.
"""
def write(packet):
"""
Write packet to address we are connected to.
"""
def getHost():
"""
Returns L{UNIXAddress}.
"""
def getPeer():
"""
Returns L{UNIXAddress}.
"""
class IMulticastTransport(Interface):
"""
Additional functionality for multicast UDP.
"""
def getOutgoingInterface():
"""
Return interface of outgoing multicast packets.
"""
def setOutgoingInterface(addr):
"""
Set interface for outgoing multicast packets.
Returns Deferred of success.
"""
def getLoopbackMode():
"""
Return if loopback mode is enabled.
"""
def setLoopbackMode(mode):
"""
Set if loopback mode is enabled.
"""
def getTTL():
"""
Get time to live for multicast packets.
"""
def setTTL(ttl):
"""
Set time to live on multicast packets.
"""
def joinGroup(addr, interface=""):
"""
Join a multicast group. Returns L{Deferred} of success or failure.
If an error occurs, the returned L{Deferred} will fail with
L{error.MulticastJoinError}.
"""
def leaveGroup(addr, interface=""):
"""
Leave multicast group, return L{Deferred} of success.
"""
class IStreamClientEndpoint(Interface):
"""
A stream client endpoint is a place that L{ClientFactory} can connect to.
For example, a remote TCP host/port pair would be a TCP client endpoint.
@since: 10.1
"""
def connect(protocolFactory):
"""
Connect the C{protocolFactory} to the location specified by this
L{IStreamClientEndpoint} provider.
@param protocolFactory: A provider of L{IProtocolFactory}
@return: A L{Deferred} that results in an L{IProtocol} upon successful
connection otherwise a L{ConnectError}
"""
class IStreamServerEndpoint(Interface):
"""
A stream server endpoint is a place that a L{Factory} can listen for
incoming connections.
@since: 10.1
"""
def listen(protocolFactory):
"""
Listen with C{protocolFactory} at the location specified by this
L{IStreamServerEndpoint} provider.
@param protocolFactory: A provider of L{IProtocolFactory}
@return: A L{Deferred} that results in an L{IListeningPort} or an
L{CannotListenError}
"""
class IStreamServerEndpointStringParser(Interface):
"""
An L{IStreamServerEndpointStringParser} is like an
L{IStreamClientEndpointStringParser}, except for L{IStreamServerEndpoint}s
instead of clients. It integrates with L{endpoints.serverFromString} in
much the same way.
"""
prefix = Attribute(
"""
@see: L{IStreamClientEndpointStringParser.prefix}
"""
)
def parseStreamServer(reactor, *args, **kwargs):
"""
Parse a stream server endpoint from a reactor and string-only arguments
and keyword arguments.
@see: L{IStreamClientEndpointStringParser.parseStreamClient}
@return: a stream server endpoint
@rtype: L{IStreamServerEndpoint}
"""
class IStreamClientEndpointStringParser(Interface):
"""
This interface is deprecated since Twisted 14.0; please use the
L{IStreamClientEndpointStringParserWithReactor} interface instead.
An L{IStreamClientEndpointStringParser} is a parser which can convert
a set of string C{*args} and C{**kwargs} into an L{IStreamClientEndpoint}
provider.
This interface is really only useful in the context of the plugin system
for L{endpoints.clientFromString}. See the document entitled "I{The
Twisted Plugin System}" for more details on how to write a plugin.
If you place an L{IStreamClientEndpointStringParser} plugin in the
C{twisted.plugins} package, that plugin's C{parseStreamClient} method will
be used to produce endpoints for any description string that begins with
the result of that L{IStreamClientEndpointStringParser}'s prefix attribute.
If a L{IStreamClientEndpointStringParserWithReactor} plugin and
L{IStreamClientEndpointStringParser} plugin share the same prefix, the
L{IStreamClientEndpointStringParserWithReactor} plugin will be preferred.
"""
prefix = Attribute(
"""
A C{str}, the description prefix to respond to. For example, an
L{IStreamClientEndpointStringParser} plugin which had C{"foo"} for its
C{prefix} attribute would be called for endpoint descriptions like
C{"foo:bar:baz"} or C{"foo:"}.
"""
)
def parseStreamClient(*args, **kwargs):
"""
This method is invoked by L{endpoints.clientFromString}, if the type of
endpoint matches the return value from this
L{IStreamClientEndpointStringParser}'s C{prefix} method.
@param args: The string arguments, minus the endpoint type, in the
endpoint description string, parsed according to the rules
described in L{endpoints.quoteStringArgument}. For example, if the
description were C{"my-type:foo:bar:baz=qux"}, C{args} would be
C{('foo','bar')}
@param kwargs: The string arguments from the endpoint description
passed as keyword arguments. For example, if the description were
C{"my-type:foo:bar:baz=qux"}, C{kwargs} would be
C{dict(baz='qux')}.
@return: a client endpoint
@rtype: L{IStreamClientEndpoint}
"""
deprecate.deprecatedModuleAttribute(
Version("Twisted", 14, 0, 0),
"This interface has been superseded by "
"IStreamClientEndpointStringParserWithReactor.",
__name__,
"IStreamClientEndpointStringParser")
class IStreamClientEndpointStringParserWithReactor(Interface):
"""
An L{IStreamClientEndpointStringParserWithReactor} is a parser which can
convert a set of string C{*args} and C{**kwargs} into an
L{IStreamClientEndpoint} provider. It's much like
L{IStreamClientEndpointStringParser}, except that the reactor is passed
along to L{parseStreamClient} too.
This interface is really only useful in the context of the plugin system
for L{endpoints.clientFromString}. See the document entitled "I{The
Twisted Plugin System}" for more details on how to write a plugin.
If you place an L{IStreamClientEndpointStringParserWithReactor} plugin in
the C{twisted.plugins} package, that plugin's C{parseStreamClient} method
will be used to produce endpoints for any description string that begins
with the result of that L{IStreamClientEndpointStringParserWithReactor}'s
prefix attribute.
If a L{IStreamClientEndpointStringParserWithReactor} plugin and
L{IStreamClientEndpointStringParser} plugin share the same prefix, the
L{IStreamClientEndpointStringParserWithReactor} plugin will be preferred.
"""
prefix = Attribute(
"""
L{bytes}, the description prefix to respond to. For example, an
L{IStreamClientEndpointStringParserWithReactor} plugin which had
C{b"foo"} for its C{prefix} attribute would be called for endpoint
descriptions like C{b"foo:bar:baz"} or C{b"foo:"}.
"""
)
def parseStreamClient(reactor, *args, **kwargs):
"""
This method is invoked by L{endpoints.clientFromString}, if the type of
endpoint matches the return value from this
L{IStreamClientEndpointStringParserWithReactor}'s C{prefix} method.
@param reactor: The reactor passed to L{endpoints.clientFromString}.
@param args: The byte string arguments, minus the endpoint type, in the
endpoint description string, parsed according to the rules
described in L{endpoints.quoteStringArgument}. For example, if the
description were C{b"my-type:foo:bar:baz=qux"}, C{args} would be
C{(b'foo', b'bar')}
@param kwargs: The byte string arguments from the endpoint description
passed as keyword arguments. For example, if the description were
C{b"my-type:foo:bar:baz=qux"}, C{kwargs} would be
C{dict(baz=b'qux')}.
@return: a client endpoint
@rtype: a provider of L{IStreamClientEndpoint}
"""
| 33.483296
| 97
| 0.630116
|
4a12eea9db939e3ddcee6e8980de25d79a2d0459
| 754
|
py
|
Python
|
scripts/airflow/dags/replicator_transfer_flow.py
|
CityofToronto/bdit_move_etl
|
ca3aa7c4d543ddd304a19a3620a570d123e5817b
|
[
"MIT"
] | null | null | null |
scripts/airflow/dags/replicator_transfer_flow.py
|
CityofToronto/bdit_move_etl
|
ca3aa7c4d543ddd304a19a3620a570d123e5817b
|
[
"MIT"
] | 2
|
2021-10-01T14:37:47.000Z
|
2022-03-11T17:11:27.000Z
|
scripts/airflow/dags/replicator_transfer_flow.py
|
CityofToronto/bdit_move_etl
|
ca3aa7c4d543ddd304a19a3620a570d123e5817b
|
[
"MIT"
] | null | null | null |
"""
replicator_transfer_flow
Completes replication of FLOW data by loading it from `/data/replicator/flashcrow-FLOW`
into the database.
This is intended to run after `replicator` copies FLOW data from the City intranet to
`/data/replicator/flashcrow-FLOW`.
"""
# pylint: disable=pointless-statement
from datetime import datetime
from airflow.operators.bash_operator import BashOperator
from airflow_utils import create_bash_task_nested, create_dag
START_DATE = datetime(2020, 10, 15)
SCHEDULE_INTERVAL = '30 3 * * 1,3,5,6'
DAG = create_dag(__file__, __doc__, START_DATE, SCHEDULE_INTERVAL)
REPLICATOR_TRANSFER_FLOW = BashOperator(
task_id='replicator_transfer_flow',
bash_command='/replicator_transfer/replicator-transfer-FLOW.sh',
dag=DAG
)
| 29
| 87
| 0.801061
|
4a12eecff400acda2815e2e1df395d7e9ee3d0f8
| 747
|
py
|
Python
|
tests/test_installation.py
|
abhishek-kumar-code/redfishtool_nsfcac
|
928edbf2c9c3ebfd6cb4722a2a77b1e63372211c
|
[
"MIT"
] | 8,477
|
2016-05-19T08:57:19.000Z
|
2020-08-12T11:08:08.000Z
|
tests/test_installation.py
|
abhishek-kumar-code/redfishtool_nsfcac
|
928edbf2c9c3ebfd6cb4722a2a77b1e63372211c
|
[
"MIT"
] | 130
|
2016-05-19T13:38:05.000Z
|
2020-08-12T07:39:26.000Z
|
tests/test_installation.py
|
abhishek-kumar-code/redfishtool_nsfcac
|
928edbf2c9c3ebfd6cb4722a2a77b1e63372211c
|
[
"MIT"
] | 360
|
2016-05-19T14:36:20.000Z
|
2020-07-30T21:55:56.000Z
|
"""Test if http-prompt is installed correctly."""
import subprocess
import pytest
from subprocess import PIPE
from .utils import get_http_prompt_path
from http_prompt import __version__
def run_http_prompt(args):
"""Run http-prompt from terminal."""
bin_path = get_http_prompt_path()
p = subprocess.Popen([bin_path] + args, stdin=PIPE, stdout=PIPE)
return p.communicate()
@pytest.mark.slow
def test_help():
out, err = run_http_prompt(['--help'])
assert out.startswith(b'Usage: http-prompt')
@pytest.mark.slow
def test_version():
out, err = run_http_prompt(['--version'])
version = __version__
if hasattr(version, 'encode'):
version = version.encode('ascii')
assert out.rstrip() == version
| 22.636364
| 68
| 0.702811
|
4a12effb4ca6657563f4a701ddf5158e9ce1ac92
| 2,532
|
py
|
Python
|
sabnzbd/utils/upload.py
|
pl77/sabnzbd
|
7e87a0c759944966ce7318134d8ed89b569ae73f
|
[
"0BSD",
"PSF-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
sabnzbd/utils/upload.py
|
pl77/sabnzbd
|
7e87a0c759944966ce7318134d8ed89b569ae73f
|
[
"0BSD",
"PSF-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
sabnzbd/utils/upload.py
|
pl77/sabnzbd
|
7e87a0c759944966ce7318134d8ed89b569ae73f
|
[
"0BSD",
"PSF-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python3 -OO
# Copyright 2009-2019 The SABnzbd-Team <team@sabnzbd.org>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.utils.upload - File association functions for adding nzb files to sabnzbd
"""
import os
import logging
import urllib.request
import urllib.parse
import urllib.error
import sabnzbd.cfg as cfg
from sabnzbd.filesystem import get_ext, get_filename
from sabnzbd.constants import VALID_ARCHIVES, VALID_NZB_FILES
from sabnzbd.dirscanner import process_nzb_archive_file, process_single_nzb
from sabnzbd.misc import get_from_url
def upload_file(url, fp):
""" Function for uploading nzbs to a running SABnzbd instance """
try:
fp = urllib.parse.quote_plus(fp)
url = "%s&mode=addlocalfile&name=%s" % (url, fp)
# Add local API-key if it wasn't already in the registered URL
apikey = cfg.api_key()
if apikey and "apikey" not in url:
url = "%s&apikey=%s" % (url, apikey)
if "apikey" not in url:
# Use alternative login method
username = cfg.username()
password = cfg.password()
if username and password:
url = "%s&ma_username=%s&ma_password=%s" % (url, username, password)
get_from_url(url)
except:
logging.error("Failed to upload file: %s", fp)
logging.info("Traceback: ", exc_info=True)
def add_local(f):
""" Function for easily adding nzb/zip/rar/nzb.gz to SABnzbd """
if os.path.exists(f):
fn = get_filename(f)
if fn:
if get_ext(fn) in VALID_ARCHIVES:
process_nzb_archive_file(fn, f, keep=True)
elif get_ext(fn) in VALID_NZB_FILES:
process_single_nzb(fn, f, keep=True)
else:
logging.error("Filename not found: %s", f)
else:
logging.error("File not found: %s", f)
| 37.235294
| 84
| 0.67733
|
4a12f004ba24f22ba767fdb7ec3aadadda6e26a1
| 3,276
|
py
|
Python
|
examples/daal4py/decision_forest_regression_hist_batch.py
|
a-vasenin/scikit-learn-intelex
|
b81f81098a7f9302c6a052a5d22ecd372682844d
|
[
"Apache-2.0"
] | null | null | null |
examples/daal4py/decision_forest_regression_hist_batch.py
|
a-vasenin/scikit-learn-intelex
|
b81f81098a7f9302c6a052a5d22ecd372682844d
|
[
"Apache-2.0"
] | null | null | null |
examples/daal4py/decision_forest_regression_hist_batch.py
|
a-vasenin/scikit-learn-intelex
|
b81f81098a7f9302c6a052a5d22ecd372682844d
|
[
"Apache-2.0"
] | null | null | null |
#===============================================================================
# Copyright 2021-2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
# daal4py Decision Forest Regression example of Hist method for shared memory systems
import daal4py as d4p
import numpy as np
# let's try to use pandas' fast csv reader
try:
import pandas
def read_csv(f, c, t=np.float64):
return pandas.read_csv(f, usecols=c, delimiter=',', header=None, dtype=np.float32)
except ImportError:
# fall back to numpy loadtxt
def read_csv(f, c, t=np.float64):
return np.loadtxt(f, usecols=c, delimiter=',', ndmin=2, dtype=np.float32)
def main(readcsv=read_csv, method='hist'):
infile = "./data/batch/df_regression_train.csv"
testfile = "./data/batch/df_regression_test.csv"
# Configure a Decision Forest regression training object
train_algo = d4p.decision_forest_regression_training(
method=method,
maxBins=512,
minBinSize=1,
nTrees=100,
varImportance='MDA_Raw',
bootstrap=True,
engine=d4p.engines_mt2203(seed=777),
resultsToCompute='computeOutOfBagError|computeOutOfBagErrorPerObservation'
)
# Read data. Let's have 13 independent,
# and 1 dependent variables (for each observation)
indep_data = readcsv(infile, range(13), t=np.float32)
dep_data = readcsv(infile, range(13, 14), t=np.float32)
# Now train/compute, the result provides the model for prediction
train_result = train_algo.compute(indep_data, dep_data)
# Traiing result provides (depending on parameters) model,
# outOfBagError, outOfBagErrorPerObservation and/or variableImportance
# Now let's do some prediction
predict_algo = d4p.decision_forest_regression_prediction()
# read test data (with same #features)
pdata = readcsv(testfile, range(13), t=np.float32)
ptdata = readcsv(testfile, range(13, 14), t=np.float32)
# now predict using the model from the training above
predict_result = predict_algo.compute(pdata, train_result.model)
# The prediction result provides prediction
assert predict_result.prediction.shape == (pdata.shape[0], dep_data.shape[1])
return (train_result, predict_result, ptdata)
if __name__ == "__main__":
(train_result, predict_result, ptdata) = main()
print("\nVariable importance results:\n", train_result.variableImportance)
print("\nOOB error:\n", train_result.outOfBagError)
print(
"\nDecision forest prediction results (first 10 rows):\n",
predict_result.prediction[0:10]
)
print("\nGround truth (first 10 rows):\n", ptdata[0:10])
print('All looks good!')
| 39.46988
| 90
| 0.682845
|
4a12f07122f944eaeb388cf89750e0805cf37043
| 547
|
py
|
Python
|
modelbased-rl/MBPO/ED2-MBPO/setup.py
|
TJU-DRL-LAB/ai-optimizer
|
f558cc524c66460913989519779873b371bf78bc
|
[
"MIT"
] | null | null | null |
modelbased-rl/MBPO/ED2-MBPO/setup.py
|
TJU-DRL-LAB/ai-optimizer
|
f558cc524c66460913989519779873b371bf78bc
|
[
"MIT"
] | null | null | null |
modelbased-rl/MBPO/ED2-MBPO/setup.py
|
TJU-DRL-LAB/ai-optimizer
|
f558cc524c66460913989519779873b371bf78bc
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
from setuptools import find_packages
setup(
name='ed2-mbpo',
packages=find_packages(),
version='0.0.1',
description='ED2-Model-based policy optimization',
long_description=open('README.md').read(),
author='XXX',
author_email='XXX',
url='XXX',
entry_points={
'console_scripts': (
'mbpo=softlearning.scripts.console_scripts:main',
'viskit=mbpo.scripts.console_scripts:main'
)
},
requires=(),
zip_safe=True,
license='MIT'
)
| 23.782609
| 61
| 0.634369
|
4a12f14233500b169b73d29dd7ca8f78ba2e8242
| 4,796
|
py
|
Python
|
AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_hash.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 2,757
|
2018-04-28T21:41:36.000Z
|
2022-03-29T06:33:36.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_hash.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 20
|
2019-07-23T15:29:32.000Z
|
2022-01-21T12:53:04.000Z
|
AppPkg/Applications/Python/Python-2.7.2/Lib/test/test_hash.py
|
CEOALT1/RefindPlusUDK
|
116b957ad735f96fbb6d80a0ba582046960ba164
|
[
"BSD-2-Clause"
] | 449
|
2018-05-09T05:54:05.000Z
|
2022-03-30T14:54:18.000Z
|
# test the invariant that
# iff a==b then hash(a)==hash(b)
#
# Also test that hash implementations are inherited as expected
import unittest
from test import test_support
from collections import Hashable
class HashEqualityTestCase(unittest.TestCase):
def same_hash(self, *objlist):
# Hash each object given and fail if
# the hash values are not all the same.
hashed = map(hash, objlist)
for h in hashed[1:]:
if h != hashed[0]:
self.fail("hashed values differ: %r" % (objlist,))
def test_numeric_literals(self):
self.same_hash(1, 1L, 1.0, 1.0+0.0j)
self.same_hash(0, 0L, 0.0, 0.0+0.0j)
self.same_hash(-1, -1L, -1.0, -1.0+0.0j)
self.same_hash(-2, -2L, -2.0, -2.0+0.0j)
def test_coerced_integers(self):
self.same_hash(int(1), long(1), float(1), complex(1),
int('1'), float('1.0'))
self.same_hash(int(-2**31), long(-2**31), float(-2**31))
self.same_hash(int(1-2**31), long(1-2**31), float(1-2**31))
self.same_hash(int(2**31-1), long(2**31-1), float(2**31-1))
# for 64-bit platforms
self.same_hash(int(2**31), long(2**31), float(2**31))
self.same_hash(int(-2**63), long(-2**63), float(-2**63))
self.same_hash(int(1-2**63), long(1-2**63))
self.same_hash(int(2**63-1), long(2**63-1))
self.same_hash(long(2**63), float(2**63))
def test_coerced_floats(self):
self.same_hash(long(1.23e300), float(1.23e300))
self.same_hash(float(0.5), complex(0.5, 0.0))
_default_hash = object.__hash__
class DefaultHash(object): pass
_FIXED_HASH_VALUE = 42
class FixedHash(object):
def __hash__(self):
return _FIXED_HASH_VALUE
class OnlyEquality(object):
def __eq__(self, other):
return self is other
# Trick to suppress Py3k warning in 2.x
__hash__ = None
del OnlyEquality.__hash__
class OnlyInequality(object):
def __ne__(self, other):
return self is not other
class OnlyCmp(object):
def __cmp__(self, other):
return cmp(id(self), id(other))
# Trick to suppress Py3k warning in 2.x
__hash__ = None
del OnlyCmp.__hash__
class InheritedHashWithEquality(FixedHash, OnlyEquality): pass
class InheritedHashWithInequality(FixedHash, OnlyInequality): pass
class InheritedHashWithCmp(FixedHash, OnlyCmp): pass
class NoHash(object):
__hash__ = None
class HashInheritanceTestCase(unittest.TestCase):
default_expected = [object(),
DefaultHash(),
OnlyEquality(),
OnlyInequality(),
OnlyCmp(),
]
fixed_expected = [FixedHash(),
InheritedHashWithEquality(),
InheritedHashWithInequality(),
InheritedHashWithCmp(),
]
error_expected = [NoHash()]
def test_default_hash(self):
for obj in self.default_expected:
self.assertEqual(hash(obj), _default_hash(obj))
def test_fixed_hash(self):
for obj in self.fixed_expected:
self.assertEqual(hash(obj), _FIXED_HASH_VALUE)
def test_error_hash(self):
for obj in self.error_expected:
self.assertRaises(TypeError, hash, obj)
def test_hashable(self):
objects = (self.default_expected +
self.fixed_expected)
for obj in objects:
self.assertIsInstance(obj, Hashable)
def test_not_hashable(self):
for obj in self.error_expected:
self.assertNotIsInstance(obj, Hashable)
# Issue #4701: Check that some builtin types are correctly hashable
# (This test only used to fail in Python 3.0, but has been included
# in 2.x along with the lazy call to PyType_Ready in PyObject_Hash)
class DefaultIterSeq(object):
seq = range(10)
def __len__(self):
return len(self.seq)
def __getitem__(self, index):
return self.seq[index]
class HashBuiltinsTestCase(unittest.TestCase):
hashes_to_check = [xrange(10),
enumerate(xrange(10)),
iter(DefaultIterSeq()),
iter(lambda: 0, 0),
]
def test_hashes(self):
_default_hash = object.__hash__
for obj in self.hashes_to_check:
self.assertEqual(hash(obj), _default_hash(obj))
def test_main():
test_support.run_unittest(HashEqualityTestCase,
HashInheritanceTestCase,
HashBuiltinsTestCase)
if __name__ == "__main__":
test_main()
| 33.075862
| 70
| 0.590909
|
4a12f17ba3aa8f24c96b74caa5c34a1af5dd0eef
| 742
|
py
|
Python
|
vote/migrations/0002_auto_20180323_0639.py
|
RohanDukare/OnlineVoting
|
e1c355fab0fdd21cc63c4be9e16fc55731479f17
|
[
"MIT"
] | 7
|
2019-05-17T06:12:57.000Z
|
2021-02-07T03:48:57.000Z
|
vote/migrations/0002_auto_20180323_0639.py
|
RohanDukare/OnlineVoting
|
e1c355fab0fdd21cc63c4be9e16fc55731479f17
|
[
"MIT"
] | null | null | null |
vote/migrations/0002_auto_20180323_0639.py
|
RohanDukare/OnlineVoting
|
e1c355fab0fdd21cc63c4be9e16fc55731479f17
|
[
"MIT"
] | 5
|
2019-05-17T06:13:10.000Z
|
2021-02-07T03:49:21.000Z
|
# Generated by Django 2.0.3 on 2018-03-23 01:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('Registration', '0002_auto_20180323_0628'),
('vote', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='votes',
name='vid',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='Registration.Candidate'),
preserve_default=False,
),
migrations.AddField(
model_name='votes',
name='vote',
field=models.IntegerField(default=1),
preserve_default=False,
),
]
| 26.5
| 121
| 0.601078
|
4a12f26f8d02b74fb1d2a059cf1b1315bae5ec73
| 892
|
py
|
Python
|
examples/pressure_calc.py
|
NathanERa/atoMEC
|
309eb632b48c638ff30cc7c82fea2f46463480a9
|
[
"BSD-3-Clause"
] | null | null | null |
examples/pressure_calc.py
|
NathanERa/atoMEC
|
309eb632b48c638ff30cc7c82fea2f46463480a9
|
[
"BSD-3-Clause"
] | null | null | null |
examples/pressure_calc.py
|
NathanERa/atoMEC
|
309eb632b48c638ff30cc7c82fea2f46463480a9
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
"""
Computes the pressure using finite-differences for Beryllium
"""
from atoMEC import Atom, models, config
# use all cores
config.numcores = -1
atom_species = "Be" # helium
r_s = 2.35 # Wigner-Seitz radius of room-temp Be
temperature = 25 # temperature in eV
# initialize the atom object
Be = Atom(atom_species, radius=r_s, temp=temperature, units_temp="eV")
# initialize the model
model = models.ISModel(Be, bc="dirichlet")
# compute the total energy
# define the number of levels to scan for
# note that nmax should be much greater than the actual levels interested in
# and should be tested for convergence
nmax = 40
lmax = 3
scf_output = model.CalcEnergy(nmax, lmax, grid_params={"ngrid": 2000})
# with the energy output compute the pressure
# we print the scf info to see what's happening
pressure = model.CalcPressure(Be, scf_output, write_info=True)
| 27.875
| 76
| 0.751121
|
4a12f3260e322d58c07691508863df971a6a7d5c
| 141
|
py
|
Python
|
dd_1/Part 1/Section 09 - Modules, Packages and Namespaces/08 - structuring_package_imports/common/validators/boolean.py
|
Rebell-Leader/bg
|
616a40286fe1d34db2916762c477676ed8067cdb
|
[
"Apache-2.0"
] | 3,266
|
2017-08-06T16:51:46.000Z
|
2022-03-30T07:34:24.000Z
|
python-tuts/0-beginner/8-Modules_Packages_Namespaces/08 - Pkg Imports/common/validators/boolean.py
|
Kemal321/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials
|
0e7bad7ac30f4ceda3a78cd49f76bd6035982972
|
[
"Apache-2.0"
] | 150
|
2017-08-28T14:59:36.000Z
|
2022-03-11T23:21:35.000Z
|
python-tuts/0-beginner/8-Modules_Packages_Namespaces/08 - Pkg Imports/common/validators/boolean.py
|
Kemal321/Artificial-Intelligence-Deep-Learning-Machine-Learning-Tutorials
|
0e7bad7ac30f4ceda3a78cd49f76bd6035982972
|
[
"Apache-2.0"
] | 1,449
|
2017-08-06T17:40:59.000Z
|
2022-03-31T12:03:24.000Z
|
# boolean.py
__all__ = ['is_boolean']
def is_boolean(arg):
pass
def boolean_helper_1():
pass
def boolean_helper_2():
pass
| 8.8125
| 24
| 0.652482
|
4a12f3ad6a0ae8ea56ed8961aae41e1e7e9bf1f0
| 3,590
|
py
|
Python
|
src/practice-useless/contour.py
|
yogendra-yatnalkar/Diabetic_Retinopathy_Detection
|
61220d9548a49e919ee4e0317eec9f7b874b3802
|
[
"MIT"
] | 9
|
2020-05-23T16:54:07.000Z
|
2022-02-17T17:48:04.000Z
|
src/practice-useless/contour.py
|
yogendra-yatnalkar/Diabetic_Retinopathy_Detection
|
61220d9548a49e919ee4e0317eec9f7b874b3802
|
[
"MIT"
] | 13
|
2020-09-25T22:13:11.000Z
|
2022-03-12T00:31:01.000Z
|
src/practice-useless/contour.py
|
yogendra-yatnalkar/Diabetic_Retinopathy_Detection
|
61220d9548a49e919ee4e0317eec9f7b874b3802
|
[
"MIT"
] | 2
|
2021-05-01T05:57:24.000Z
|
2022-03-16T08:16:04.000Z
|
# Listing all the imports
import cv2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import os
import time
import imutils
import math
# image_name = input("Enter the name of image to be processed : ")
image = cv2.imread('4.png')
image = imutils.resize(image, height = 520, width = 400)
# image = cv2.resize(image, (600, 600))
org = image.copy()
def displayImage(image, display_name):
cv2.namedWindow(display_name,cv2.WINDOW_AUTOSIZE)
cv2.imshow(display_name, image)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (5, 5), 0)
thresh = cv2.threshold(blurred, 10, 255, cv2.THRESH_BINARY)[1]
cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = imutils.grab_contours(cnts)
cnts = max(cnts, key=cv2.contourArea)
# print(cnts)
# print(thresh[cX,cY])
leftmost = tuple(cnts[cnts[:,:,0].argmin()][0])
rightmost = tuple(cnts[cnts[:,:,0].argmax()][0])
topmost = tuple(cnts[cnts[:,:,1].argmin()][0])
bottommost = tuple(cnts[cnts[:,:,1].argmax()][0])
# print('leftmost',leftmost)
# print('rightmost',rightmost)
# print('topmost',topmost)
# print('bottommost',bottommost)
x1 = leftmost[0]
y1 = topmost[1]
x2 = rightmost[0]
y2 = bottommost[1]
ht = int(y1 - y2)
wd = int(x2 - x1)
print(x1,y1,'---',x2,y2)
c = cnts
M = cv2.moments(cnts)
if( M["m00"]==0):
cX, cY = 0, 0
else:
cX = int(M["m10"] / M["m00"])
cY = int(M["m01"] / M["m00"])
print(cX,cY)
if(cX < cY):
r = cX
else:
r = cY
# draw the contour and center of the shape on the image
cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
cv2.circle(image, (cX, cY), 7, (255, 255, 255), -1)
cv2.putText(image, "center", (cX - 20, cY - 20),
cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 255, 255), 2)
def Radius_Reduction(img,cX,cY,r):
h,w,c=img.shape
Frame=np.zeros((h,w,c),dtype=np.uint8)
# cv2.circle(Frame,(int(math.floor(w/2)),int(math.floor(h/2))),int(math.floor((h*PARAM)/float(2*100))), (255,255,255), -1)
cv2.circle(Frame,(int(cX),int(cY)),int(r), (255,255,255), -1)
Frame1=cv2.cvtColor(Frame, cv2.COLOR_BGR2GRAY)
img1 =cv2.bitwise_and(img,img,mask=Frame1)
return img1
# crop = org[y1:y2 - y1, x1:x2 - x1]
crop = org[y1:y2, x1:x2]
crop = imutils.resize(crop, height = 520, width = 400)
#-----CLAHE-----------------------------------
lab= cv2.cvtColor(crop, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8,8))
cl = clahe.apply(l)
limg = cv2.merge((cl,a,b))
final = cv2.cvtColor(limg, cv2.COLOR_LAB2BGR)
# -------------------------------------------
def subtract_median_bg_image(im):
k = np.max(im.shape)//20*2+1
bg = cv2.medianBlur(im, k)
return cv2.addWeighted (im, 4, bg, -4, 100)
smed = subtract_median_bg_image(final)
img1 = Radius_Reduction(smed,cX,cY,r)
#-----Augmentation-------------------------------
x_flip = cv2.flip( crop, 0 )
y_flip = cv2.flip( crop, 1 )
xy_flip = cv2.flip(x_flip,1)
imgf = cv2.bitwise_and(smed,final)
displayImage(imgf, 'imgf')
# normalizedImg = cv2.normalize(imgf, normalizedImg, alpha=50, beta=255, norm_type=cv2.NORM_MINMAX)
# show the image
# displayImage(image, 'cnts')
# displayImage(thresh, 'thresh')
displayImage(img1, 'img1')
displayImage(org, 'org')
displayImage(crop, 'crop')
displayImage(final, 'final')
# displayImage(normalizedImg, 'normalizedImg')
displayImage(smed, 'smed')
# cv2.imwrite('ans.png',smed)
# cv2.imwrite('anscr.png',crop)
# displayImage(x_flip, 'x')
# displayImage(y_flip, 'y')
# displayImage(xy_flip, 'xy')
cv2.waitKey(0)
cv2.destroyAllWindows()
| 26.992481
| 126
| 0.652368
|
4a12f3fefbc6804255ff56e8a6b115a08af63eae
| 3,113
|
py
|
Python
|
Loan-Defaulters/code.py
|
nicsquality/ga-learner-dsmp-repo
|
eb7bd8fae2525dd00d2caae4b87cd2f6e165c148
|
[
"MIT"
] | null | null | null |
Loan-Defaulters/code.py
|
nicsquality/ga-learner-dsmp-repo
|
eb7bd8fae2525dd00d2caae4b87cd2f6e165c148
|
[
"MIT"
] | null | null | null |
Loan-Defaulters/code.py
|
nicsquality/ga-learner-dsmp-repo
|
eb7bd8fae2525dd00d2caae4b87cd2f6e165c148
|
[
"MIT"
] | null | null | null |
# --------------
#Importing header files
import pandas as pd
from sklearn.model_selection import train_test_split
# Code starts here
data = pd.read_csv(path)
X = data.drop(columns = ['customer.id', 'paid.back.loan'])
y = data.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.3, random_state = 0)
# Code ends here
# --------------
#Importing header files
import matplotlib.pyplot as plt
# Code starts here
fully_paid = y_train.value_counts()
fully_paid.plot(kind = 'bar')
# Code ends here
# --------------
#Importing header files
import numpy as np
from sklearn.preprocessing import LabelEncoder
# Code starts here
X_train['int.rate'] = X_train['int.rate'].str.rstrip('%').astype('float') / 100
X_test['int.rate'] = X_test['int.rate'].str.rstrip('%').astype('float') / 100
num_df = X_train.select_dtypes(include=['number'])
cat_df = X_train.select_dtypes(include=['object'])
# Code ends here
# --------------
#Importing header files
import seaborn as sns
# Code starts here
cols = num_df.columns
fig, axes = plt.subplots(nrows = 9, ncols = 1)
for i in range(0, 9):
sns.boxplot(x= y_train, y= num_df[cols[i]], ax=axes[i])
# Code ends here
# --------------
# Code starts here
cols = cat_df.columns
fig, axes = plt.subplots(nrows = 2 , ncols = 2)
for i in range(0, 2):
for j in range(0, 2):
sns.countplot(x = X_train[cols[i*2+j]], hue= y_train, ax=axes[i,j])
# Code ends here
# --------------
#Importing header files
from sklearn.tree import DecisionTreeClassifier
# Code starts here
le = LabelEncoder()
model = DecisionTreeClassifier(random_state = 0)
X_train.fillna('NA', inplace = True)
X_test.fillna('NA', inplace = True)
y_train = y_train.map({'Yes': 1, 'No': 0})
y_test = y_test.map({'Yes': 1, 'No': 0})
for i in cat_df:
X_train[i] = le.fit_transform(X_train[i])
X_test[i] = le.transform(X_test[i])
model.fit(X_train, y_train)
acc = model.score(X_test, y_test)
# Code ends here
# --------------
#Importing header files
from sklearn.model_selection import GridSearchCV
#Parameter grid
parameter_grid = {'max_depth': np.arange(3,10), 'min_samples_leaf': range(10,50,10)}
# Code starts here
model_2 = DecisionTreeClassifier(random_state = 0)
p_tree = GridSearchCV(estimator = model_2, param_grid = parameter_grid, cv = 5)
p_tree.fit(X_train, y_train)
acc_2 = p_tree.score(X_test, y_test)
# Code ends here
# --------------
#Importing header files
from io import StringIO
from sklearn.tree import export_graphviz
from sklearn import tree
from sklearn import metrics
from IPython.display import Image
import pydotplus
# Code starts here
dot_data = export_graphviz(decision_tree=p_tree.best_estimator_, out_file=None, feature_names=X.columns, filled = True, class_names=['loan_paid_back_yes','loan_paid_back_no'])
graph_big = pydotplus.graph_from_dot_data(dot_data)
# show graph - do not delete/modify the code below this line
img_path = user_data_dir+'/file.png'
graph_big.write_png(img_path)
plt.figure(figsize=(20,15))
plt.imshow(plt.imread(img_path))
plt.axis('off')
plt.show()
# Code ends here
| 21.176871
| 175
| 0.697398
|
4a12f41c2dde811ec5b4e7de095295c9e25463c1
| 1,338
|
py
|
Python
|
dfvfs/volume/gpt_volume_system.py
|
DianaYejiKang/dfVFS_ubi
|
a2d5c0ca2d475336d338a2af5cddcac46e703ca2
|
[
"Apache-2.0"
] | 7
|
2015-02-28T17:46:25.000Z
|
2022-01-18T22:56:53.000Z
|
dfvfs/volume/gpt_volume_system.py
|
DianaYejiKang/dfVFS_ubi
|
a2d5c0ca2d475336d338a2af5cddcac46e703ca2
|
[
"Apache-2.0"
] | null | null | null |
dfvfs/volume/gpt_volume_system.py
|
DianaYejiKang/dfVFS_ubi
|
a2d5c0ca2d475336d338a2af5cddcac46e703ca2
|
[
"Apache-2.0"
] | 2
|
2019-08-28T23:47:08.000Z
|
2021-07-23T07:15:11.000Z
|
# -*- coding: utf-8 -*-
"""The GUID Partition Table (GPT) volume system."""
from dfvfs.lib import definitions
from dfvfs.volume import volume_system
class GPTVolume(volume_system.Volume):
"""Volume that uses pyvsgpt."""
def __init__(self, file_entry):
"""Initializes a GPT volume.
Args:
file_entry (GPTFileEntry): a GPT file entry.
"""
super(GPTVolume, self).__init__(file_entry.name)
self._file_entry = file_entry
def _Parse(self):
"""Extracts attributes and extents from the volume."""
vsgpt_partition = self._file_entry.GetGPTPartition()
volume_attribute = volume_system.VolumeAttribute(
'identifier', vsgpt_partition.identifier)
self._AddAttribute(volume_attribute)
volume_extent = volume_system.VolumeExtent(
vsgpt_partition.volume_offset, vsgpt_partition.size)
self._extents.append(volume_extent)
class GPTVolumeSystem(volume_system.VolumeSystem):
"""Volume system that uses pyvsgpt."""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_GPT
VOLUME_IDENTIFIER_PREFIX = 'p'
def _Parse(self):
"""Extracts sections and volumes from the volume system."""
root_file_entry = self._file_system.GetRootFileEntry()
for sub_file_entry in root_file_entry.sub_file_entries:
volume = GPTVolume(sub_file_entry)
self._AddVolume(volume)
| 28.468085
| 63
| 0.735426
|
4a12f43a7c6b00e491ed9ab507efeba111376dfb
| 3,914
|
py
|
Python
|
analysis/explore_data.py
|
rhwhite/eventTracking
|
a9498cf56a4d82259c47555b7e5fae2f7c8a0b16
|
[
"MIT"
] | 1
|
2017-11-28T02:35:01.000Z
|
2017-11-28T02:35:01.000Z
|
analysis/explore_data.py
|
rhwhite/eventTracking
|
a9498cf56a4d82259c47555b7e5fae2f7c8a0b16
|
[
"MIT"
] | null | null | null |
analysis/explore_data.py
|
rhwhite/eventTracking
|
a9498cf56a4d82259c47555b7e5fae2f7c8a0b16
|
[
"MIT"
] | 2
|
2019-07-02T02:42:00.000Z
|
2020-07-28T05:56:45.000Z
|
"""
This script opens up precipitation event data files with python debugging set up so that the data
files can be explored in real time
Author: Rachel White, rachel.white@cantab.net
"""
import os, errno
import numpy as np
import netCDF4
from netCDF4 import Dataset
import datetime as dt
import re
import sys
import Ngl
import xray
import math
import resource
import argparse
import pdb
from rhwhitepackages.readwrite import getunitsdesc
from rhwhitepackages.readwrite import xrayOpen
from rhwhitepackages.readwrite import getdirectory
rsrcV = resource.RLIMIT_AS
soft, hard = resource.getrlimit(rsrcV)
print 'Soft limit starts as :', soft
print 'Hard limit starts as :', hard
resource.setrlimit(rsrcV, (120000000000, hard)) #limit memory usage
# 137438953472
soft, hard = resource.getrlimit(rsrcV)
print 'Soft limit changed to :', soft
def memory_usage_psutil():
# return the memory usage in MB
import psutil
process = psutil.Process(os.getpid())
mem = process.get_memory_info()[0] / float(2 ** 20)
return mem
#print memory_usage_psutil()
parser = argparse.ArgumentParser(description="map event data")
parser.add_argument('--Data',type=str,nargs=1,help='type of Data, TRMM, ERAI,ERA20C, or CESM')
parser.add_argument('--Version',type=str,nargs=1,help='Version of Data, Standard, low, 6th_from6 etc')
parser.add_argument('--filetspan',type=str,nargs='?',default=['3hrly'],help='string for file time resolution, 3hrly etc')
parser.add_argument('--startyr',metavar='startyr',type=int,nargs=1,help='start year for analysis')
parser.add_argument('--endyr',type=int,nargs=1,help='end year for analysis')
args = parser.parse_args()
print "here's what I have as arguments: ", args
if args.Data[0] not in ['TRMM','TRMMERAIgd','ERAI','ERA20C','CESM']:
exit("incorrect Data option " + str(args.Data[0]) + " must be TRMM, TRMMERAIgd, ERAI,ERA20C or CESM")
Data = args.Data[0]
Version = args.Version[0]
filetimespan = args.filetspan[0]
startyr = args.startyr[0]
endyr = args.endyr[0]
R = 6371000 # radius of Earth in m
nyears = endyr - startyr + 1
mints = np.zeros(nyears)
maxts = np.zeros(nyears)
plotdensity = False
minevent = 100000
DirI = '/home/disk/eos4/rachel/EventTracking/FiT_RW_ERA/' + Data + '_output/' + Version + str(startyr) + '/proc/'
if Data == "TRMM":
if Version == '6th_from6' or Version == '5th_from48':
DirI = '/home/disk/eos4/rachel/EventTracking/FiT_RW/TRMM_output/' + Version + '/proc/'
FileInLats = '/home/disk/eos4/rachel/Obs/TRMM/SeasAnn_TRMM_1998-2014_3B42_3hrly_nonan.nc'
elif Data == "TRMMERAIgd":
FileInLats = '/home/disk/eos4/rachel/Obs/TRMM/regrid2ERAI_TRMM_3B42_1998-2014.nc'
elif Data == "ERAI":
FileInLats = '/home/disk/eos4/rachel/Obs/ERAI/3hrly/Precip_3hrly/SeasAnn_ERAI_Totalprecip_' + str(startyr) + '-' + str(endyr) + '_preprocess.nc'
elif Data == "ERA20C":
FileInLats = '/home/disk/eos4/rachel/Obs/ERA_20C/ERA_20C_LatLon.nc'
elif Data == "CESM":
DirI = '/home/disk/eos4/rachel/EventTracking/FiT_RW_ERA/CESM_output/' + Version + str(startyr) + '/proc/'
FileInLats = '/home/disk/eos4/rachel/EventTracking/Inputs/CESM/f.e13.FAMPIC5.ne120_ne120.1979_2012.001/f.e13.FAMIPC5.ne120_ne120_TotalPrecip_1979-2012.nc'
else:
print("unexpected data type")
exit()
FileI1 = 'All_Precip_' + str(startyr) + '-' + str(endyr) + '_' + Data + '_' + Version + '.nc'
#Get lons and lats
iday = 0
print FileInLats
FileIn = xrayOpen(FileInLats)
if Data == "CESM":
lats = FileIn['lat'].values
lons = FileIn['lon'].values
elif Data in ["ERA20C","TRMMERAIgd"]:
lats = FileIn['latitude'].values
lons = FileIn['longitude'].values
else:
lats = FileIn['Latitude'].values
lons = FileIn['Longitude'].values
nlats = len(lats)
nlons = len(lons)
print DirI + FileI1
datain = xrayOpen(DirI + FileI1,decodetimes=False)
pdb.set_trace()
print datain.variables
| 30.818898
| 158
| 0.71487
|
4a12f5b40447be6c58321441688334735856a028
| 15,920
|
py
|
Python
|
nnvm/python/nnvm/compiler/build_module.py
|
CortexFoundation/tvm-cvm
|
d8941dc60a51dd27a6d2accc1eff2eced3b3640d
|
[
"Apache-2.0"
] | 6
|
2019-07-04T09:42:53.000Z
|
2021-12-28T13:19:48.000Z
|
nnvm/python/nnvm/compiler/build_module.py
|
CortexFoundation/tvm-cvm
|
d8941dc60a51dd27a6d2accc1eff2eced3b3640d
|
[
"Apache-2.0"
] | 4
|
2019-06-27T08:05:18.000Z
|
2021-09-09T18:59:11.000Z
|
nnvm/python/nnvm/compiler/build_module.py
|
CortexFoundation/tvm-cvm
|
d8941dc60a51dd27a6d2accc1eff2eced3b3640d
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=invalid-name
"""Namespace for building operators."""
from __future__ import absolute_import as _abs
import logging
import tvm
from tvm.contrib import graph_runtime
from tvm import autotvm
from . import graph_attr, graph_util
from .. import graph as _graph
from .. import symbol as sym
from .._base import _all_var_init
OPT_PASS_LEVEL = {
"SimplifyInference": 0,
"PrecomputePrune": 2,
"OpFusion": 1,
"FoldScaleAxis": 3,
"AlterOpLayout": 3,
}
# List of optimization pass and level when switch on
class BuildConfig(object):
"""Configuration scope to set a build config option.
Parameters
----------
kwargs
Keyword arguments of configurations to set.
"""
current = None
defaults = {
"opt_level": 2,
"add_pass": None,
"runtime": "cvm",
}
def __init__(self, **kwargs):
self._old_scope = None
for k, _ in kwargs.items():
if k not in BuildConfig.defaults:
raise ValueError(
"invalid argument %s, candidates are %s" % (k, BuildConfig.defaults.keys()))
self._attr = kwargs
def __getattr__(self, name):
if name not in self._attr:
return BuildConfig.defaults[name]
return self._attr[name]
def __enter__(self):
# pylint: disable=protected-access
self._old_scope = BuildConfig.current
attr = BuildConfig.current._attr.copy()
attr.update(self._attr)
self._attr = attr
BuildConfig.current = self
return self
def __exit__(self, ptype, value, trace):
assert self._old_scope
BuildConfig.current = self._old_scope
def pass_enabled(self, pass_name):
"""Get whether pass is enabled.
Parameters
----------
pass_name : str
The optimization pass name
Returns
-------
enabled : bool
Whether pass is enabled.
"""
if self.add_pass and pass_name in self.add_pass:
return True
return self.opt_level >= OPT_PASS_LEVEL[pass_name]
BuildConfig.current = BuildConfig()
def build_config(**kwargs):
"""Configure the build behavior by setting config variables.
Parameters
----------
opt_level: int, default=2
Optimization level. See OPT_PASS_LEVEL for level of each pass.
add_pass: set of str
Optimization pass to be added regardless of optimization level.
Returns
-------
config: BuildConfig
The build configuration
"""
return BuildConfig(**kwargs)
@tvm.register_func("nnvm.compiler.lower")
def _lower(sch, inputs, func_name, graph):
import traceback
# pylint: disable=broad-except
try:
f = tvm.lower(sch, inputs, name=func_name)
# logging.debug("lower function %s", func_name)
# logging.debug("%s", tvm.lower(sch, inputs, simple_mode=True))
except Exception:
msg = traceback.format_exc()
msg += "Error during compile graph\n"
msg += "--------------------------\n"
msg += graph.ir(join_entry_attrs=["shape"])
raise RuntimeError(msg)
return f if isinstance(
f, (tvm.container.Array, tuple, list)) else [f]
@tvm.register_func("nnvm.compiler.build_target")
def _build(funcs, target, target_host):
if target_host == "":
target_host = None
return tvm.build(funcs, target=target, target_host=target_host)
def _update_shape_dtype(shape, dtype, params):
"""Update shape dtype given params information"""
if not params:
return shape, dtype
shape = shape.copy()
shape.update({k : v.shape for k, v in params.items()})
if isinstance(dtype, str):
for k, v in params.items():
if v.dtype != dtype:
raise ValueError(
"%s: dtype not expected %s vs %s" % (k, dtype, v.dtype))
else:
dtype = dtype.copy()
dtype.update({k : str(v.dtype) for k, v in params.items()})
return shape, dtype
def optimize(graph, shape, dtype="float32", layout=None):
"""Perform target and parameter invariant graph optimization.
This is an advanced function that usually do not need to be called.
Call build instead.
Parameters
----------
graph : Graph
The graph to be used in optimized.
Returns
-------
graph : Graph
The optimized graph.
"""
# pylint: disable=unused-argument
cfg = BuildConfig.current
if cfg.pass_enabled("AlterOpLayout"):
layout = layout if layout else {}
graph = graph_attr.set_layout_inputs(graph, layout)
graph = graph.apply(["CorrectLayout"])
graph = graph_attr.set_shape_inputs(graph, shape)
graph = graph_attr.set_dtype_inputs(graph, dtype)
graph = graph.apply(["InferShape", "InferType", "AlterOpLayout"])
graph = graph_attr.set_layout_inputs(graph, layout)
graph = graph.apply(["CorrectLayout"])
if cfg.pass_enabled("SimplifyInference"):
graph = graph_attr.set_shape_inputs(graph, shape)
graph = graph.apply(["InferShape", "SimplifyInference"])
if cfg.pass_enabled("FoldScaleAxis"):
graph = graph_attr.set_shape_inputs(graph, shape)
graph = graph.apply(["InferShape", "FoldScaleAxis"])
return graph
def build(graph, target=None, shape=None, dtype="float32",
params=None, target_host=None, layout=None):
"""Build graph into runtime library.
The build function will optimize the graph and do the compilation.
When params is provided, the compiler might split the graph to
pre-compute certain values, so the final execution graph can
be different from the original one.
Parameters
----------
graph : Graph
The graph to be used in lowering
target : str or :any:`tvm.target.Target`, optional
The build target
shape : dict of str to tuple, optional
The input shape to the graph
dtype : str or dict of str to str
The input types to the graph
params : dict of str to NDArray
Input parameters to the graph that do not change
during inference time. Used for pre-compute
folding optimization.
target_host : str or :any:`tvm.target.Target` optional
Host compilation target, if target is device.
When TVM compiles device specific program such as CUDA,
we also need host(CPU) side code to interact with the driver
setup the dimensions and parameters correctly.
target_host is used to specify the host side codegen target.
By default, llvm is used if it is enabled,
otherwise a stackvm intepreter is used.
layout : dict of str to str or str optional
The input layout
Returns
-------
graph : Graph
The final execution graph.
libmod : tvm.Module
The module that comes with the execution graph
params : dict of str to NDArray
The updated parameters of graph if params is passed.
This can be different from the params passed in.
"""
target = target if target else tvm.target.current_target()
if target is None:
raise ValueError("Target is not set in env or passed as argument.")
target = tvm.target.create(target)
# If current dispatch context is fallback context (the default root context),
# then load pre-tuned parameters from TopHub
if isinstance(autotvm.DispatchContext.current, autotvm.FallbackContext):
tophub_context = autotvm.tophub.context(target)
else:
tophub_context = autotvm.util.EmptyContext()
with tophub_context:
shape = shape if shape else {}
if not isinstance(shape, dict):
raise TypeError("require shape to be dict")
for value in shape.values():
if not all(isinstance(x, tvm._ffi.base.integer_types) for x in value):
raise TypeError("shape value must be Integer types iterator")
cfg = BuildConfig.current
graph = graph if isinstance(graph, _graph.Graph) else _graph.create(graph)
shape, dtype = _update_shape_dtype(shape, dtype, params)
# correct layout if necessary
layout = layout if layout else {}
graph = graph_attr.set_layout_inputs(graph, layout)
graph = graph.apply("CorrectLayout")
index = graph.index
layouts = graph.json_attr("layout")
layout = {x: layouts[index.entry_id(x)] for x in index.input_names}
# Initial pass do shape type inference
ishape, _ = graph_util.infer_shape(graph, **shape)
shape.update(zip(graph.index.input_names, ishape))
if not isinstance(dtype, str):
idtype, _ = graph_util.infer_dtype(graph, **dtype)
dtype.update(zip(graph.index.input_names, idtype))
# Initialize all variables specified in _all_var_init
init_var = {}
if _all_var_init:
init_var = initialize_variables(shape, dtype)
# Apply optimization
with target:
graph = optimize(graph, shape, dtype, layout)
# Clear extra params without nodes.
_remove_noref_params(params, graph)
# Precompute prune
if params and cfg.pass_enabled("PrecomputePrune"):
graph, params = precompute_prune(graph, params)
shape, dtype = _update_shape_dtype(shape, dtype, params)
# Operator Fusion and generation
graph = graph_attr.set_shape_inputs(graph, shape)
graph = graph.apply("InferShape")
graph = graph_attr.set_dtype_inputs(graph, dtype)
graph._set_json_attr("target", str(target), "str")
if target_host is not None:
graph._set_json_attr("target_host", str(target_host), "str")
if cfg.pass_enabled("OpFusion"):
graph._set_json_attr("opt_level", 1, "int")
else:
graph._set_json_attr("opt_level", 0, "int")
graph = graph.apply("InferShape").apply("InferType")
graph = graph.apply("InferPrecision")
graph = graph.apply("GraphFindFusibleGroups")
graph = graph.apply("GraphFuse")
with target:
if cfg.runtime == "cvm":
graph = graph.apply("GraphCompile")
libmod = None
elif cfg.runtime == "tvm":
graph = graph.apply("TVMGraphCompile")
libmod = graph_attr._move_out_module(graph, "module")
else:
raise TypeError("runtime %s is not supported."%cfg.runtime)
#Write variable initial values into params
if init_var:
if params is None:
params = {}
params.update(init_var)
return graph, libmod, params
def _remove_noref_params(params, graph):
""" Helper to clear non referenced params
Parameters
----------
graph : Graph
The input graph
params: dict of str to ndarray
The parameter dictionary
"""
arg_list = set(graph.symbol.list_input_names())
if params:
param_keys = list(params.keys())
for key in param_keys:
if key not in arg_list:
params.pop(key)
def _run_graph(graph, params):
"""Helper utility to build and run and get outputs, only use cpu mode.
Parameters
----------
graph : Graph
The graph to be executed.
params: dict of str to ndarray
The parameter dictionary.
Returns
-------
out_dict: dict of str to tvm.NDArray
The output dictionaries.
"""
graph = graph if isinstance(graph, _graph.Graph) else _graph.create(graph)
shape = {k : v.shape for k, v in params.items()}
dtype = {k : v.dtype for k, v in params.items()}
target = "llvm"
ctx = tvm.cpu(0)
_, oshape = graph_util.infer_shape(graph, **shape)
_, odtype = graph_util.infer_dtype(graph, **dtype)
graph, libmod, _ = build(graph, target, shape, dtype)
m = graph_runtime.create(graph, libmod, ctx)
set_input, run, get_output = m["set_input"], m["run"], m["get_output"]
kset = set(graph.symbol.list_input_names())
for k, v in params.items():
if k in kset:
set_input(k, tvm.nd.array(v))
run()
out_data = []
for i, kv in enumerate(zip(oshape, odtype)):
shape, dtype = kv
arr = tvm.nd.empty(shape, dtype, ctx)
get_output(i, arr)
out_data.append(arr)
return out_data
def precompute_prune(graph, params):
"""Precompute the part of graph that can be pre-computed.
This will create a new graph that only contains the ops
that need to be computed depending on input as well as
updated version of param dict that pre-computes some of
intermediate results.
Parameters
----------
graph : Graph
The input graph
params : dict of str -> tvm.NDArray
The parameter dictionary of the graph
Returns
-------
pruned_graph : Graph
The pruned graph
new_params : dict of str-> tvm.NDArray
The updated dictionary of parameters.
"""
graph = graph if isinstance(graph, _graph.Graph) else _graph.create(graph)
graph._set_json_attr("param_name_list", list(params.keys()), "list_str")
graph = graph.apply("PrecomputePrune")
pre_graph = graph_attr._move_out_graph(graph, "precompute_graph")
if pre_graph is None:
return graph, params
out_names = pre_graph.json_attr("output_names")
if not pre_graph.symbol.list_output_names():
return graph, params
with tvm.build_config(auto_unroll_max_step=0):
out_arrs = _run_graph(pre_graph, params)
return graph, dict(zip(out_names, out_arrs))
def initialize_variables(ishape, idtype):
""" Initialize variables stored in _all_var_init dictionary.
Parameters
----------
ishape : dict of str to tuple of int
The input shape to the graph
idtype : str or dict of str to str
The input types to the graph
Returns
-------
init_var : dict of str to tvm.ndarray
"""
symbol_init_dict = {}
const_init_dict = {}
init_var = {}
for key, value in _all_var_init.items():
if isinstance(value, sym.Symbol):
symbol_init_dict[key] = value
else:
const_init_dict[key] = tvm.nd.array(value)
# Make sure variables are initialized only once.
_all_var_init.clear()
if symbol_init_dict:
# Create dummy params to run initialization graph
params = {}
for name, shape in ishape.items():
dtype = idtype if isinstance(idtype, str) else idtype[name]
params[name] = tvm.nd.empty(shape, dtype, ctx=tvm.cpu())
init_group_sym = sym.Group(symbol_init_dict.values())
graph = _graph.create(init_group_sym)
with tvm.build_config(auto_unroll_max_step=0):
init_values = _run_graph(graph, params)
init_var.update(dict(zip(symbol_init_dict.keys(), init_values)))
init_var.update(const_init_dict)
for name, data in init_var.items():
ishape[name] = data.shape
return init_var
| 33.445378
| 96
| 0.640829
|
4a12f632772c1cb2a20b821c956fe9f410cc552c
| 2,999
|
py
|
Python
|
.history/classes/Handler_20171106214758.py
|
reecebenson/DADSA-Tennis-PartA
|
d0763f819b300fcd0ce27041f5bc4ef0519c00bf
|
[
"MIT"
] | null | null | null |
.history/classes/Handler_20171106214758.py
|
reecebenson/DADSA-Tennis-PartA
|
d0763f819b300fcd0ce27041f5bc4ef0519c00bf
|
[
"MIT"
] | null | null | null |
.history/classes/Handler_20171106214758.py
|
reecebenson/DADSA-Tennis-PartA
|
d0763f819b300fcd0ce27041f5bc4ef0519c00bf
|
[
"MIT"
] | null | null | null |
# DADSA - Assignment 1
# Reece Benson
import json
from classes import Player as Player
from classes import Season as Season
from classes import Tournament as Tournament
from classes import Round as Round
from classes import Match as Match
class Handler():
# Define the variables we will be using
app = None
prize_money = None
player_count = None
seasons = { }
def __init__(self, _app):
if(_app.debug):
print("[LOAD]: Loaded Handler!")
# Define our Application within this Handler class
self.app = _app
# Used to load all data into memory
def load(self):
# This function will create our seasons and implement the genders & players
self.load_players()
self.load_prize_money()
#TODO: Implement load_seasons()
# Used to load prize money
def load_prize_money(self):
with open('./data/rankingPoints.json') as tData:
data = json.load(tData)
# Make our prize_money a dictionary
if(self.prize_money == None):
self.prize_money = { }
# We want to set the prize money for all indexes possible via the player
self.prize_money = [ pts for pts in data for rank in data[pts] ]
#self.prize_money += [ 0 ] * ( self.player_count - len(self.prize_money))
print(self.prize_money)
# Used to load players from all seasons into memory
def load_players(self):
# Set our player (in gender) count
self.player_count = 0
with open('./data/players.json') as tData:
data = json.load(tData)
# Players are classed within Seasons
for season in data:
# If the season does not yet exist, create it
if(not season in self.seasons):
self.seasons[season] = { "players": { } }
# Players are then stored within Gender classifications
for gender in data[season]:
if(not gender in self.seasons[season]["players"]):
self.seasons[season]["players"][gender] = [ ]
# Append our player in the season, within the gender
for player in data[season][gender]:
#TODO: Change to using Player class
self.seasons[season]["players"][gender].append(player)
# Update our player count
if(len(self.seasons[season]["players"][gender]) > self.player_count):
self.player_count = len(self.seasons[season]["players"][gender])
def get_players(self, season):
# Check our Season exists
if(not season in self.seasons):
return None
else:
# Check we have players within our Season
if("players" in self.seasons[season]):
return self.seasons[season]["players"]
else:
return None
| 35.702381
| 93
| 0.57986
|
4a12f6efcc245c94ed2cfb420684e305beb07bf0
| 871
|
py
|
Python
|
ut_engine/preprocessed/getdata.py
|
justpic/ut_ali
|
5173011c735cdbd4b1cb9becad3b69675993225a
|
[
"MIT"
] | 1
|
2021-07-08T01:30:07.000Z
|
2021-07-08T01:30:07.000Z
|
ut_engine/preprocessed/getdata.py
|
justpic/ut_ali
|
5173011c735cdbd4b1cb9becad3b69675993225a
|
[
"MIT"
] | null | null | null |
ut_engine/preprocessed/getdata.py
|
justpic/ut_ali
|
5173011c735cdbd4b1cb9becad3b69675993225a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
import csv
reader=csv.reader(open('t_alibaba_data.csv'))
data=[]
reader.next()
for usrid,brandid,dtype,date in reader:
datestr=date.decode('gb2312')
print datestr
splitdate=datestr.split(u'\u6708')
print int(splitdate[0])
data.append((usrid,brandid,dtype,date.decode('gb2312')))
#break
print data[0] #title info
print data[1]
#print data
#print reader[0]
import numpy as np
def formatdata(file='t_alibaba_data.csv'):
data_list=[]
reader=csv.reader(open(file))
reader.next()
for userid,brandid,type,date in reader:
data_item=[]
data_item.append(int(userid))
data_item.append(int(brandid))
data_item.append(int(type))
data_item.append(int(date.decode('gb2312').split(u'\u6708')[0]))
data_list.append(data_item)
return np.array(data_list)
| 26.393939
| 72
| 0.67853
|
4a12f7d2711fee362e3882a6c43fbadd9648348e
| 3,287
|
py
|
Python
|
sentiment.py
|
GitHubEmploy/StockSentiment
|
2202a23c0efc1bc23164857b1103f2f67173abd3
|
[
"BSD-3-Clause"
] | 3
|
2020-12-26T08:46:12.000Z
|
2021-02-03T01:55:05.000Z
|
sentiment.py
|
GitHubEmploy/StockSentiment
|
2202a23c0efc1bc23164857b1103f2f67173abd3
|
[
"BSD-3-Clause"
] | null | null | null |
sentiment.py
|
GitHubEmploy/StockSentiment
|
2202a23c0efc1bc23164857b1103f2f67173abd3
|
[
"BSD-3-Clause"
] | 1
|
2021-05-26T03:26:11.000Z
|
2021-05-26T03:26:11.000Z
|
#IMPORTING DEPENDENCIES
print('Importing Flair/Torch...')
import flair
import torch
from flair.data import Sentence
print('Importing WSS(s)...')
from newsapi import NewsApiClient
import alpaca_trade_api as tradeapi
#DONE IMPORTING DEPENDENCIES
#SET RUN DEVICE AS CPU
flair.device = torch.device('cpu')
#STATING POLYGON.IO API
api = tradeapi.REST('YOURPOLYGONAPIKEY','https://api.polygon.io' )
#DEFINING FUNCTION
def sentiment(stock, api):
#LOADING TRADERVIEW
url = 'https://www.tradingview.com/screener/'
#LOADING FLAIR
flair_sentiment = flair.models.TextClassifier.load('en-sentiment')
#NEWSAPI API call
newsapi = NewsApiClient(api_key='YOURNEWSAPIKEY')
#GET THE ARTICLES
response = newsapi.get_everything(qintitle=stock)
#SPECIFY API CALL INSIDE THE FUNCTION
news = api.polygon.news(stock)
#OPEN NEWS.TXT TO WRITE NEWS
file = open('news.txt', 'w')
#VERIFY SENTIMENT VARIABLE IS 0
sentiment = 0
print(response)
#ITERATES THROUGH EVERY NEWS ARTICLE FROM NEWS API
for line in response['articles']:
words = str(line['title'])
file.write(words)
#RUNS FLAIR SENTIMENT ANALYSIS
sentence = Sentence(str(words))
flair_sentiment.predict(sentence)
total_sentiment = sentence.labels
print(str(words))
# Checks to see if the sentiment is negative and subtracts by how negative flair thinks it is
if total_sentiment[0].value == 'NEGATIVE':
print(str(total_sentiment[0].value) + " : " + str(total_sentiment[0].to_dict()['confidence']))
sentiment -= total_sentiment[0].to_dict()['confidence'] / 2 # Flair favors negative outcomes
# Checks to see if the sentiment is positive and adds how positive flair thinks it is
elif total_sentiment[0].value == 'POSITIVE':
print(str(total_sentiment[0].value) + " : " + str(total_sentiment[0].to_dict()['confidence']))
sentiment += total_sentiment[0].to_dict()['confidence']
#ITERATES THROUGH EVERY NEWS ARTICLE FROM POLYGON.IO
for source in news:
words = source.summary
try:
file.write(words)
except:
print('FAILSAFE ACTIVATED')
file.write('\n')
# Runs Flair sentiment analysis
sentence = Sentence(str(words))
try:
flair_sentiment.predict(sentence)
except:
print("\n")
total_sentiment = sentence.labels
print(str(words))
# Checks to see if the sentiment is negative and subtracts by how negative flair thinks it is
if total_sentiment[0].value == 'NEGATIVE':
print(str(total_sentiment[0].value) + " : " + str(total_sentiment[0].to_dict()['confidence']))
sentiment -= total_sentiment[0].to_dict()['confidence'] / 2 # Flair favors negative outcomes
# Checks to see if the sentiment is positive and adds how positive flair thinks it is
if total_sentiment[0].value == 'POSITIVE':
print(str(total_sentiment[0].value) + " : " + str(total_sentiment[0].to_dict()['confidence']))
sentiment += total_sentiment[0].to_dict()['confidence']
file.close()
print('Total sentiment', sentiment) #News Sentiment
sentiment('AAPL', api)
| 34.968085
| 106
| 0.658655
|
4a12f8f6eaf8024ccbc158cd8f29d22aaa043b7d
| 462
|
py
|
Python
|
mayan/apps/rest_api/urls.py
|
garrans/mayan-edms
|
e95e90cc47447a1ae72629271652824aa9868572
|
[
"Apache-2.0"
] | 1
|
2020-07-15T02:56:02.000Z
|
2020-07-15T02:56:02.000Z
|
mayan/apps/rest_api/urls.py
|
kyper999/mayan-edms
|
ca7b8301a1f68548e8e718d42a728a500d67286e
|
[
"Apache-2.0"
] | null | null | null |
mayan/apps/rest_api/urls.py
|
kyper999/mayan-edms
|
ca7b8301a1f68548e8e718d42a728a500d67286e
|
[
"Apache-2.0"
] | 2
|
2020-02-24T21:02:31.000Z
|
2021-01-05T23:52:01.000Z
|
from __future__ import unicode_literals
from django.conf.urls import patterns, url
from .views import APIBase, APIAppView, BrowseableObtainAuthToken
urlpatterns = patterns(
'',
)
api_urls = patterns(
'',
url(r'^$', APIBase.as_view(), name='api_root'),
url(r'^api/(?P<path>.*)/?$', APIAppView.as_view(), name='api_app'),
url(
r'^auth/token/obtain/$', BrowseableObtainAuthToken.as_view(),
name='auth_token_obtain'
),
)
| 22
| 71
| 0.660173
|
4a12fa539b985c7de1b25f22e30c8e1582a544c3
| 978
|
py
|
Python
|
2020/8/8.py
|
jeremy-quicklearner/advent-of-code
|
a87541fda41326b47498bc7e5134515a8793d896
|
[
"MIT"
] | null | null | null |
2020/8/8.py
|
jeremy-quicklearner/advent-of-code
|
a87541fda41326b47498bc7e5134515a8793d896
|
[
"MIT"
] | null | null | null |
2020/8/8.py
|
jeremy-quicklearner/advent-of-code
|
a87541fda41326b47498bc7e5134515a8793d896
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
with open('cs.txt') as fh:
lines = fh.readlines()
def run(prog):
visited = {}
acc = 0
pc = 0
while pc not in visited:
if pc == len(prog):
print('Terminates with acc ' + str(acc))
return True
visited[pc] = 1
opcode, arg = prog[pc].strip().split(' ')
arg = int(arg)
if opcode == 'nop':
pc += 1
continue
if opcode == 'jmp':
pc += arg
continue
if opcode == 'acc':
acc += arg
pc += 1
continue
print('Loops with acc ' + str(acc))
return False
for idx in range(len(lines)):
opcode, arg = lines[idx].strip().split(' ')
if opcode == 'acc':
print('Skipping acc')
continue
prog = lines.copy()
if opcode == 'nop':
prog[idx] = 'jmp ' + arg + '\n'
elif opcode == 'jmp':
prog[idx] = 'nop ' + arg + '\n'
run(prog)
| 19.176471
| 52
| 0.45501
|
4a12fa9937f780170972260008bc5cd65719cb77
| 777
|
py
|
Python
|
launcher/__main__.py
|
mfahdaz/TVB-NEST
|
c16e18f41ff07b33482a55f4a033b8c698051c9a
|
[
"Apache-2.0"
] | 2
|
2020-10-21T11:45:19.000Z
|
2020-12-01T09:32:53.000Z
|
launcher/__main__.py
|
mfahdaz/TVB-NEST
|
c16e18f41ff07b33482a55f4a033b8c698051c9a
|
[
"Apache-2.0"
] | 10
|
2020-11-17T09:33:19.000Z
|
2022-01-11T17:00:40.000Z
|
launcher/__main__.py
|
mfahdaz/TVB-NEST
|
c16e18f41ff07b33482a55f4a033b8c698051c9a
|
[
"Apache-2.0"
] | 9
|
2020-11-17T08:52:51.000Z
|
2021-12-10T12:25:04.000Z
|
# ------------------------------------------------------------------------------
# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor
# license agreements; and to You under the Apache License, Version 2.0. "
#
# Forschungszentrum Jülich
# Institute: Institute for Advanced Simulation (IAS)
# Section: Jülich Supercomputing Centre (JSC)
# Division: High Performance Computing in Neuroscience
# Laboratory: Simulation Laboratory Neuroscience
# Team: Multi-scale Simulation and Design
#
# ------------------------------------------------------------------------------
import sys
from .main import main
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 37
| 81
| 0.604891
|
4a12fbf4a7b596147ea8820cf23da1c8d1c50e88
| 14,002
|
py
|
Python
|
cirq-core/cirq/sim/state_vector.py
|
tw3akslow/Cirq
|
0209c6514ba3334b3c82ec2422c59240d1f3af50
|
[
"Apache-2.0"
] | 1
|
2021-07-20T16:23:45.000Z
|
2021-07-20T16:23:45.000Z
|
cirq-core/cirq/sim/state_vector.py
|
xeedmm/Cirq
|
dc013726c9472d39bc2a3909208e188fb535e081
|
[
"Apache-2.0"
] | null | null | null |
cirq-core/cirq/sim/state_vector.py
|
xeedmm/Cirq
|
dc013726c9472d39bc2a3909208e188fb535e081
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Helpers for handling quantum state vectors."""
from typing import Dict, List, Optional, Tuple, TYPE_CHECKING, Sequence
import abc
import numpy as np
from cirq import linalg, ops, qis, value
from cirq.sim import simulator
if TYPE_CHECKING:
import cirq
# For backwards compatibility and to make mypy happy:
from cirq.qis import STATE_VECTOR_LIKE # pylint: disable=unused-import,wrong-import-position
class StateVectorMixin:
"""A mixin that provide methods for objects that have a state vector."""
# Reason for 'type: ignore': https://github.com/python/mypy/issues/5887
def __init__(self, qubit_map: Optional[Dict[ops.Qid, int]] = None, *args, **kwargs):
"""Inits StateVectorMixin.
Args:
qubit_map: A map from the Qubits in the Circuit to the the index
of this qubit for a canonical ordering. This canonical ordering
is used to define the state (see the state_vector() method).
"""
super().__init__(*args, **kwargs) # type: ignore
self._qubit_map = qubit_map or {}
qid_shape = simulator._qubit_map_to_shape(self._qubit_map)
self._qid_shape = None if qubit_map is None else qid_shape
@property
def qubit_map(self) -> Dict[ops.Qid, int]:
return self._qubit_map
def _qid_shape_(self) -> Tuple[int, ...]:
if self._qid_shape is None:
return NotImplemented
return self._qid_shape
@abc.abstractmethod
def state_vector(self) -> np.ndarray:
"""Return the state vector (wave function).
The vector is returned in the computational basis with these basis
states defined by the `qubit_map`. In particular the value in the
`qubit_map` is the index of the qubit, and these are translated into
binary vectors where the last qubit is the 1s bit of the index, the
second-to-last is the 2s bit of the index, and so forth (i.e. big
endian ordering).
Example:
qubit_map: {QubitA: 0, QubitB: 1, QubitC: 2}
Then the returned vector will have indices mapped to qubit basis
states like the following table
| | QubitA | QubitB | QubitC |
| :-: | :----: | :----: | :----: |
| 0 | 0 | 0 | 0 |
| 1 | 0 | 0 | 1 |
| 2 | 0 | 1 | 0 |
| 3 | 0 | 1 | 1 |
| 4 | 1 | 0 | 0 |
| 5 | 1 | 0 | 1 |
| 6 | 1 | 1 | 0 |
| 7 | 1 | 1 | 1 |
"""
raise NotImplementedError()
def dirac_notation(self, decimals: int = 2) -> str:
"""Returns the state vector as a string in Dirac notation.
Args:
decimals: How many decimals to include in the pretty print.
Returns:
A pretty string consisting of a sum of computational basis kets
and non-zero floats of the specified accuracy."""
return qis.dirac_notation(self.state_vector(), decimals, qid_shape=self._qid_shape)
def density_matrix_of(self, qubits: List[ops.Qid] = None) -> np.ndarray:
r"""Returns the density matrix of the state.
Calculate the density matrix for the system on the list, qubits.
Any qubits not in the list that are present in self.state_vector() will
be traced out. If qubits is None the full density matrix for
self.state_vector() is returned, given self.state_vector() follows
standard Kronecker convention of numpy.kron.
For example:
self.state_vector() = np.array([1/np.sqrt(2), 1/np.sqrt(2)],
dtype=np.complex64)
qubits = None
gives us
$$
\rho = \begin{bmatrix}
0.5 & 0.5 \\
0.5 & 0.5
\end{bmatrix}
$$
Args:
qubits: list containing qubit IDs that you would like
to include in the density matrix (i.e.) qubits that WON'T
be traced out.
Returns:
A numpy array representing the density matrix.
Raises:
ValueError: if the size of the state represents more than 25 qubits.
IndexError: if the indices are out of range for the number of qubits
corresponding to the state.
"""
return qis.density_matrix_from_state_vector(
self.state_vector(),
[self.qubit_map[q] for q in qubits] if qubits is not None else None,
qid_shape=self._qid_shape,
)
def bloch_vector_of(self, qubit: 'cirq.Qid') -> np.ndarray:
"""Returns the bloch vector of a qubit in the state.
Calculates the bloch vector of the given qubit
in the state given by self.state_vector(), given that
self.state_vector() follows the standard Kronecker convention of
numpy.kron.
Args:
qubit: qubit who's bloch vector we want to find.
Returns:
A length 3 numpy array representing the qubit's bloch vector.
Raises:
ValueError: if the size of the state represents more than 25 qubits.
IndexError: if index is out of range for the number of qubits
corresponding to the state.
"""
return qis.bloch_vector_from_state_vector(
self.state_vector(), self.qubit_map[qubit], qid_shape=self._qid_shape
)
def sample_state_vector(
state_vector: np.ndarray,
indices: List[int],
*, # Force keyword args
qid_shape: Optional[Tuple[int, ...]] = None,
repetitions: int = 1,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> np.ndarray:
"""Samples repeatedly from measurements in the computational basis.
Note that this does not modify the passed in state.
Args:
state_vector: The multi-qubit state vector to be sampled. This is an
array of 2 to the power of the number of qubit complex numbers, and
so state must be of size ``2**integer``. The `state_vector` can be
a vector of size ``2**integer`` or a tensor of shape
``(2, 2, ..., 2)``.
indices: Which qubits are measured. The `state_vector` is assumed to be
supplied in big endian order. That is the xth index of v, when
expressed as a bitstring, has its largest values in the 0th index.
qid_shape: The qid shape of the `state_vector`. Specify this argument
when using qudits.
repetitions: The number of times to sample.
seed: A seed for the pseudorandom number generator.
Returns:
Measurement results with True corresponding to the ``|1⟩`` state.
The outer list is for repetitions, and the inner corresponds to
measurements ordered by the supplied qubits. These lists
are wrapped as a numpy ndarray.
Raises:
ValueError: ``repetitions`` is less than one or size of `state_vector`
is not a power of 2.
IndexError: An index from ``indices`` is out of range, given the number
of qubits corresponding to the state.
"""
if repetitions < 0:
raise ValueError(f'Number of repetitions cannot be negative. Was {repetitions}')
shape = qis.validate_qid_shape(state_vector, qid_shape)
num_qubits = len(shape)
qis.validate_indices(num_qubits, indices)
if repetitions == 0 or len(indices) == 0:
return np.zeros(shape=(repetitions, len(indices)), dtype=np.uint8)
prng = value.parse_random_state(seed)
# Calculate the measurement probabilities.
probs = _probs(state_vector, indices, shape)
# We now have the probability vector, correctly ordered, so sample over
# it. Note that we us ints here, since numpy's choice does not allow for
# choosing from a list of tuples or list of lists.
result = prng.choice(len(probs), size=repetitions, p=probs)
# Convert to individual qudit measurements.
meas_shape = tuple(shape[i] for i in indices)
return np.array(
[value.big_endian_int_to_digits(result[i], base=meas_shape) for i in range(len(result))],
dtype=np.uint8,
)
def measure_state_vector(
state_vector: np.ndarray,
indices: Sequence[int],
*, # Force keyword args
qid_shape: Optional[Tuple[int, ...]] = None,
out: np.ndarray = None,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> Tuple[List[int], np.ndarray]:
"""Performs a measurement of the state in the computational basis.
This does not modify `state` unless the optional `out` is `state`.
Args:
state_vector: The state to be measured. This state vector is assumed to
be normalized. The state vector must be of size 2 ** integer. The
state vector can be of shape (2 ** integer) or (2, 2, ..., 2).
indices: Which qubits are measured. The `state_vector` is assumed to be
supplied in big endian order. That is the xth index of v, when
expressed as a bitstring, has the largest values in the 0th index.
qid_shape: The qid shape of the `state_vector`. Specify this argument
when using qudits.
out: An optional place to store the result. If `out` is the same as
the `state_vector` parameter, then `state_vector` will be modified
inline. If `out` is not None, then the result is put into `out`.
If `out` is None a new value will be allocated. In all of these
case out will be the same as the returned ndarray of the method.
The shape and dtype of `out` will match that of `state_vector` if
`out` is None, otherwise it will match the shape and dtype of `out`.
seed: A seed for the pseudorandom number generator.
Returns:
A tuple of a list and a numpy array. The list is an array of booleans
corresponding to the measurement values (ordered by the indices). The
numpy array is the post measurement state vector. This state vector has
the same shape and dtype as the input `state_vector`.
Raises:
ValueError if the size of state is not a power of 2.
IndexError if the indices are out of range for the number of qubits
corresponding to the state.
"""
shape = qis.validate_qid_shape(state_vector, qid_shape)
num_qubits = len(shape)
qis.validate_indices(num_qubits, indices)
if len(indices) == 0:
if out is None:
out = np.copy(state_vector)
elif out is not state_vector:
np.copyto(dst=out, src=state_vector)
# Final else: if out is state then state will be modified in place.
return ([], out)
prng = value.parse_random_state(seed)
# Cache initial shape.
initial_shape = state_vector.shape
# Calculate the measurement probabilities and then make the measurement.
probs = _probs(state_vector, indices, shape)
result = prng.choice(len(probs), p=probs)
###measurement_bits = [(1 & (result >> i)) for i in range(len(indices))]
# Convert to individual qudit measurements.
meas_shape = tuple(shape[i] for i in indices)
measurement_bits = value.big_endian_int_to_digits(result, base=meas_shape)
# Calculate the slice for the measurement result.
result_slice = linalg.slice_for_qubits_equal_to(
indices, big_endian_qureg_value=result, qid_shape=shape
)
# Create a mask which is False for only the slice.
mask = np.ones(shape, dtype=bool)
mask[result_slice] = False
if out is None:
out = np.copy(state_vector)
elif out is not state_vector:
np.copyto(dst=out, src=state_vector)
# Final else: if out is state then state will be modified in place.
# Potentially reshape to tensor, and then set masked values to 0.
out.shape = shape
out[mask] = 0
# Restore original shape (if necessary) and renormalize.
out.shape = initial_shape
out /= np.sqrt(probs[result])
return measurement_bits, out
def _probs(state: np.ndarray, indices: Sequence[int], qid_shape: Tuple[int, ...]) -> np.ndarray:
"""Returns the probabilities for a measurement on the given indices."""
tensor = np.reshape(state, qid_shape)
# Calculate the probabilities for measuring the particular results.
if len(indices) == len(qid_shape):
# We're measuring every qudit, so no need for fancy indexing
probs = np.abs(tensor) ** 2
probs = np.transpose(probs, indices)
probs = np.reshape(probs, np.prod(probs.shape))
else:
# Fancy indexing required
meas_shape = tuple(qid_shape[i] for i in indices)
probs = (
np.abs(
[
tensor[
linalg.slice_for_qubits_equal_to(
indices, big_endian_qureg_value=b, qid_shape=qid_shape
)
]
for b in range(np.prod(meas_shape, dtype=int))
]
)
** 2
)
probs = np.sum(probs, axis=tuple(range(1, len(probs.shape))))
# To deal with rounding issues, ensure that the probabilities sum to 1.
probs /= np.sum(probs)
return probs
| 40.120344
| 97
| 0.626482
|
4a12fbf7809aadc3c133fa7c51c40de0042c48e3
| 562
|
py
|
Python
|
Hansen API Server 2.0/photos/Hansen Photo Geter.py
|
YuzeLuo/Picture-API-Server
|
d0f1e06673e07646d877c40e0c45d7ba4d2a4536
|
[
"CC0-1.0"
] | 1
|
2021-08-28T07:50:39.000Z
|
2021-08-28T07:50:39.000Z
|
Hansen API Server 2.0/photos/Hansen Photo Geter.py
|
YuzeLuo/Picture-API-Server
|
d0f1e06673e07646d877c40e0c45d7ba4d2a4536
|
[
"CC0-1.0"
] | 3
|
2021-08-28T07:35:48.000Z
|
2021-08-28T07:45:04.000Z
|
Hansen API Server 2.0/photos/Hansen Photo Geter.py
|
YuzeLuo/Picture-API-Server
|
d0f1e06673e07646d877c40e0c45d7ba4d2a4536
|
[
"CC0-1.0"
] | null | null | null |
import requests
from random import randint as rd
a=0
n=int(input("Enter the number:"))
api_list=["http://www.dmoe.cc/random.php","https://api.ixiaowai.cn/api/api.php","https://api.ixiaowai.cn/mcapi/mcapi.php","https://acg.yanwz.cn/wallpaper/api.php","https://acg.yanwz.cn/api.php"]
for i in range(n):
try:
a+=1
code=requests.get(api_list[rd(0,4)],timeout=10).content
j=open("%d.png"%a,"wb+")
j.write(code)
j.close()
print("successfully get"+str(a))
except:
a=a-1
pass
| 28.1
| 195
| 0.587189
|
4a12fcf3d6aa849b67c15c08a2ced1dfaa6ea018
| 9,265
|
py
|
Python
|
parlai/tasks/bot_adversarial_dialogue/agents.py
|
zl930216/ParlAI
|
abf0ad6d1779af0f8ce0b5aed00d2bab71416684
|
[
"MIT"
] | 1
|
2021-04-25T10:02:11.000Z
|
2021-04-25T10:02:11.000Z
|
parlai/tasks/bot_adversarial_dialogue/agents.py
|
zl930216/ParlAI
|
abf0ad6d1779af0f8ce0b5aed00d2bab71416684
|
[
"MIT"
] | 1
|
2020-11-12T02:20:02.000Z
|
2020-11-12T02:20:02.000Z
|
parlai/tasks/bot_adversarial_dialogue/agents.py
|
zl930216/ParlAI
|
abf0ad6d1779af0f8ce0b5aed00d2bab71416684
|
[
"MIT"
] | 1
|
2020-09-05T20:25:13.000Z
|
2020-09-05T20:25:13.000Z
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Optional
from parlai.core.params import ParlaiParser
import os
from parlai.core.opt import Opt
from parlai.core.teachers import ParlAIDialogTeacher
from parlai.tasks.bot_adversarial_dialogue.build import (
build_dialogue_datasets,
build_human_safety_eval_dataset,
get_adversarial_dialogue_folder,
get_human_safety_eval_folder,
)
import parlai.utils.logging as logging
from parlai.utils.io import PathManager
from parlai.utils.misc import str_to_msg
from parlai.tasks.dialogue_safety.agents import OK_CLASS, NOT_OK_CLASS
from parlai.core.message import Message
# Constants
SAFETY_DICT = {'safe': OK_CLASS, 'unsafe': NOT_OK_CLASS}
def _adversarial_dialogue_datapath(opt: Opt) -> str:
"""
Return the filepath for the specified datatype of the specified adversarial dialogue
task.
"""
build_dialogue_datasets(opt)
# Build the data if it doesn't exist.
dt = opt['datatype'].split(':')[0]
data_path = os.path.join(
get_adversarial_dialogue_folder(opt['datapath']),
'bot_adversarial_dialogue_datasets_with_persona',
dt + '.txt',
)
return data_path
class BotAdversarialDialogueTeacher(ParlAIDialogTeacher):
"""
Teacher for bot_adversarial_dialogue.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
agent = parser.add_argument_group('Bot Adversarial Dialogue options')
agent.add_argument(
'--bad-num-turns',
type=int,
default=-1,
help="Number of dialogue turns to be truncated",
)
agent.add_argument(
'--bad-speaker-to-eval',
type=str,
default='all',
choices=['bot', 'human', 'all'],
help="The speaker of the utterances labeled",
)
agent.add_argument(
'--bad-safety-mix',
type=str,
default='all',
choices=['safe', 'unsafe', 'all'],
help="The set of language being included. If 'safe', only display language labeled as __ok__",
)
agent.add_argument(
'--bad-include-persona',
type=bool,
default=False,
help="Whether to include bot persona or not in the message",
)
return parser
def __init__(self, opt, shared=None):
opt['parlaidialogteacher_datafile'] = _adversarial_dialogue_datapath(opt=opt)
super().__init__(opt, shared=shared)
self.id = '{}.numTurns_{}_speakerToEval_{}_safetyMix_{}_persona_{}'.format(
self.id,
self.opt['bad_num_turns'],
self.opt['bad_speaker_to_eval'],
self.opt['bad_safety_mix'],
self.opt['bad_include_persona'],
)
def _setup_data(self, path):
logging.info(f"Loading ParlAI text data: {path}")
self.episodes = []
self.num_exs = 0
eps = []
with PathManager.open(path, newline='\n', encoding='utf-8') as read:
for line_no, line in enumerate(read, 1):
msg = str_to_msg(line.rstrip('\n'))
if msg and 'eval_labels' in msg:
raise ValueError(
f"It looks like you've written eval_labels as a key in your "
f"data file. This is not appropriate; labels will be converted "
f"for you automatically. This is happening on Line {line_no} "
f"in {path}. The line is:\n\t{line}"
)
if msg and 'text' not in msg:
raise ValueError(
f'ParlaiDialogTeacher requires a "text" field in every '
f'entry, but one is missing in Line {line_no} in {path}. '
f'The line is:\n\t{line}'
)
if msg and 'labels' not in msg:
raise ValueError(
f'ParlaiDialogTeacher requires a "labels" field in every '
f'entry, but one is missing in Line {line_no} in {path}. '
f'The line is:\n\t{line}'
)
if (
self.opt['bad_speaker_to_eval'] != 'all'
and self.opt['bad_speaker_to_eval'] != msg['speaker_to_eval']
):
continue
if (
self.opt['bad_safety_mix'] != 'all'
and SAFETY_DICT[self.opt['bad_safety_mix']] != msg['labels'][0]
):
continue
msg_text = msg['text']
dialog = msg_text.split('\n')
if self.opt['bad_include_persona'] and msg['speaker_to_eval'] == 'bot':
# only display persona if it's asked to and if the last turn is bot.
if len(msg['bot_persona'].strip()) > 0:
dialog[0] = msg['bot_persona'] + '\n' + dialog[0]
if self.opt['bad_num_turns'] > 0:
msg_text = '\n'.join(dialog[-self.opt['bad_num_turns'] :])
else:
msg_text = '\n'.join(dialog)
if msg:
msg.force_set('text', msg_text)
self.num_exs += 1
eps.append(msg)
if msg.get('episode_done', False):
self.episodes.append(eps)
eps = []
if len(eps) > 0:
# add last episode
eps[-1].force_set('episode_done', True)
self.episodes.append(eps)
if len(self.episodes) == 1 and line_no > 100:
logging.error(
f'The data in {path} looks like one very long episode. If this '
f'is intentional, you may ignore this, but you MAY have a bug in '
f'your data.'
)
def _human_safety_eval_datapath(opt: Opt) -> str:
"""
Return the filepath for the specified datatype of the specified human evaluation
task on bot adversarial dialogue.
"""
build_human_safety_eval_dataset(opt)
# Build the data if it doesn't exist.
logging.info(
f'The data for human safety evaluation is test set only '
f'regardless of your chosen datatype, which is {opt["datatype"]} '
)
data_path = os.path.join(
get_human_safety_eval_folder(opt['datapath']), 'human_safety_eval', 'test.txt'
)
return data_path
class HumanSafetyEvaluationTeacher(ParlAIDialogTeacher):
"""
Teacher for human safety evaluation on bot adversarial dialogues.
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
agent = parser.add_argument_group(
'Bot Adversarial Dialogue Human Evaluation options'
)
agent.add_argument(
'--bad-include-persona',
type=bool,
default=False,
help="Whether to include bot persona or not in the message",
)
agent.add_argument(
'--flatten-dialogue',
type=bool,
default=True,
help="Whether to flatten the dialogue context",
)
return parser
def __init__(self, opt, shared=None):
opt['parlaidialogteacher_datafile'] = _human_safety_eval_datapath(opt=opt)
super().__init__(opt, shared=shared)
self.id = '{}.persona_{}_flatten_{}'.format(
self.id, self.opt['bad_include_persona'], self.opt['flatten_dialogue']
)
def _setup_data(self, path):
super()._setup_data(path)
new_episodes = []
for ep in self.episodes:
if self.opt['bad_include_persona'] is False:
text_without_persona = '\n'.join(
[
str_
for str_ in ep[0]['text'].split('\n')
if not str_.startswith('your persona: ')
]
)
ep[0].force_set('text', text_without_persona)
if self.opt['flatten_dialogue']:
dialog_text = '\n'.join(
[(exs['text'] + '\n' + exs['labels'][0]) for exs in ep[:-1]]
)
dialog_text = dialog_text + '\n' + ep[-1]['text']
msg = {
'text': dialog_text,
'labels': ep[-1]['labels'],
'episode_done': True,
'id': ep[0]['id'],
'human_eval_turn_range': ep[0]['human_eval_turn_range'],
'convo_id': ep[0]['convo_id'],
}
msg = Message(msg)
new_episodes.append([msg])
if self.opt['flatten_dialogue']:
self.episodes = new_episodes
self.num_exs = len(self.episodes)
class DefaultTeacher(BotAdversarialDialogueTeacher):
pass
| 37.510121
| 106
| 0.549164
|
4a12fd97e2a5df5663f365352e066bc876fd2cc4
| 10,921
|
py
|
Python
|
sdk/python/pulumi_azure_native/search/v20200801/shared_private_link_resource.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/search/v20200801/shared_private_link_resource.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/search/v20200801/shared_private_link_resource.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._enums import *
from ._inputs import *
__all__ = ['SharedPrivateLinkResourceArgs', 'SharedPrivateLinkResource']
@pulumi.input_type
class SharedPrivateLinkResourceArgs:
def __init__(__self__, *,
resource_group_name: pulumi.Input[str],
search_service_name: pulumi.Input[str],
properties: Optional[pulumi.Input['SharedPrivateLinkResourcePropertiesArgs']] = None,
shared_private_link_resource_name: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a SharedPrivateLinkResource resource.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the current subscription. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] search_service_name: The name of the Azure Cognitive Search service associated with the specified resource group.
:param pulumi.Input['SharedPrivateLinkResourcePropertiesArgs'] properties: Describes the properties of a Shared Private Link Resource managed by the Azure Cognitive Search service.
:param pulumi.Input[str] shared_private_link_resource_name: The name of the shared private link resource managed by the Azure Cognitive Search service within the specified resource group.
"""
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "search_service_name", search_service_name)
if properties is not None:
pulumi.set(__self__, "properties", properties)
if shared_private_link_resource_name is not None:
pulumi.set(__self__, "shared_private_link_resource_name", shared_private_link_resource_name)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group within the current subscription. You can obtain this value from the Azure Resource Manager API or the portal.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="searchServiceName")
def search_service_name(self) -> pulumi.Input[str]:
"""
The name of the Azure Cognitive Search service associated with the specified resource group.
"""
return pulumi.get(self, "search_service_name")
@search_service_name.setter
def search_service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "search_service_name", value)
@property
@pulumi.getter
def properties(self) -> Optional[pulumi.Input['SharedPrivateLinkResourcePropertiesArgs']]:
"""
Describes the properties of a Shared Private Link Resource managed by the Azure Cognitive Search service.
"""
return pulumi.get(self, "properties")
@properties.setter
def properties(self, value: Optional[pulumi.Input['SharedPrivateLinkResourcePropertiesArgs']]):
pulumi.set(self, "properties", value)
@property
@pulumi.getter(name="sharedPrivateLinkResourceName")
def shared_private_link_resource_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the shared private link resource managed by the Azure Cognitive Search service within the specified resource group.
"""
return pulumi.get(self, "shared_private_link_resource_name")
@shared_private_link_resource_name.setter
def shared_private_link_resource_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "shared_private_link_resource_name", value)
class SharedPrivateLinkResource(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
properties: Optional[pulumi.Input[pulumi.InputType['SharedPrivateLinkResourcePropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
search_service_name: Optional[pulumi.Input[str]] = None,
shared_private_link_resource_name: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Describes a Shared Private Link Resource managed by the Azure Cognitive Search service.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['SharedPrivateLinkResourcePropertiesArgs']] properties: Describes the properties of a Shared Private Link Resource managed by the Azure Cognitive Search service.
:param pulumi.Input[str] resource_group_name: The name of the resource group within the current subscription. You can obtain this value from the Azure Resource Manager API or the portal.
:param pulumi.Input[str] search_service_name: The name of the Azure Cognitive Search service associated with the specified resource group.
:param pulumi.Input[str] shared_private_link_resource_name: The name of the shared private link resource managed by the Azure Cognitive Search service within the specified resource group.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: SharedPrivateLinkResourceArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Describes a Shared Private Link Resource managed by the Azure Cognitive Search service.
:param str resource_name: The name of the resource.
:param SharedPrivateLinkResourceArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(SharedPrivateLinkResourceArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
properties: Optional[pulumi.Input[pulumi.InputType['SharedPrivateLinkResourcePropertiesArgs']]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
search_service_name: Optional[pulumi.Input[str]] = None,
shared_private_link_resource_name: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = SharedPrivateLinkResourceArgs.__new__(SharedPrivateLinkResourceArgs)
__props__.__dict__["properties"] = properties
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if search_service_name is None and not opts.urn:
raise TypeError("Missing required property 'search_service_name'")
__props__.__dict__["search_service_name"] = search_service_name
__props__.__dict__["shared_private_link_resource_name"] = shared_private_link_resource_name
__props__.__dict__["name"] = None
__props__.__dict__["type"] = None
alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:search/v20200801:SharedPrivateLinkResource"), pulumi.Alias(type_="azure-native:search:SharedPrivateLinkResource"), pulumi.Alias(type_="azure-nextgen:search:SharedPrivateLinkResource"), pulumi.Alias(type_="azure-native:search/v20200801preview:SharedPrivateLinkResource"), pulumi.Alias(type_="azure-nextgen:search/v20200801preview:SharedPrivateLinkResource")])
opts = pulumi.ResourceOptions.merge(opts, alias_opts)
super(SharedPrivateLinkResource, __self__).__init__(
'azure-native:search/v20200801:SharedPrivateLinkResource',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'SharedPrivateLinkResource':
"""
Get an existing SharedPrivateLinkResource resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = SharedPrivateLinkResourceArgs.__new__(SharedPrivateLinkResourceArgs)
__props__.__dict__["name"] = None
__props__.__dict__["properties"] = None
__props__.__dict__["type"] = None
return SharedPrivateLinkResource(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the resource
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def properties(self) -> pulumi.Output['outputs.SharedPrivateLinkResourcePropertiesResponse']:
"""
Describes the properties of a Shared Private Link Resource managed by the Azure Cognitive Search service.
"""
return pulumi.get(self, "properties")
@property
@pulumi.getter
def type(self) -> pulumi.Output[str]:
"""
The type of the resource. E.g. "Microsoft.Compute/virtualMachines" or "Microsoft.Storage/storageAccounts"
"""
return pulumi.get(self, "type")
| 52.253589
| 445
| 0.699753
|
4a12fde1c26e34b1249cc0c03b62129b102c170e
| 2,692
|
py
|
Python
|
homeassistant/components/blueprint/errors.py
|
rchl/core
|
974e099e2a9527d38445531c6d9bc1461ba4c36f
|
[
"Apache-2.0"
] | 1
|
2020-02-02T05:56:10.000Z
|
2020-02-02T05:56:10.000Z
|
homeassistant/components/blueprint/errors.py
|
rchl/core
|
974e099e2a9527d38445531c6d9bc1461ba4c36f
|
[
"Apache-2.0"
] | 56
|
2020-08-03T07:30:54.000Z
|
2022-03-31T06:02:04.000Z
|
homeassistant/components/blueprint/errors.py
|
rchl/core
|
974e099e2a9527d38445531c6d9bc1461ba4c36f
|
[
"Apache-2.0"
] | null | null | null |
"""Blueprint errors."""
from typing import Any, Iterable
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.exceptions import HomeAssistantError
class BlueprintException(HomeAssistantError):
"""Base exception for blueprint errors."""
def __init__(self, domain: str, msg: str) -> None:
"""Initialize a blueprint exception."""
super().__init__(msg)
self.domain = domain
class BlueprintWithNameException(BlueprintException):
"""Base exception for blueprint errors."""
def __init__(self, domain: str, blueprint_name: str, msg: str) -> None:
"""Initialize blueprint exception."""
super().__init__(domain, msg)
self.blueprint_name = blueprint_name
class FailedToLoad(BlueprintWithNameException):
"""When we failed to load the blueprint."""
def __init__(self, domain: str, blueprint_name: str, exc: Exception) -> None:
"""Initialize blueprint exception."""
super().__init__(domain, blueprint_name, f"Failed to load blueprint: {exc}")
class InvalidBlueprint(BlueprintWithNameException):
"""When we encountered an invalid blueprint."""
def __init__(
self,
domain: str,
blueprint_name: str,
blueprint_data: Any,
msg_or_exc: vol.Invalid,
):
"""Initialize an invalid blueprint error."""
if isinstance(msg_or_exc, vol.Invalid):
msg_or_exc = humanize_error(blueprint_data, msg_or_exc)
super().__init__(
domain,
blueprint_name,
f"Invalid blueprint: {msg_or_exc}",
)
self.blueprint_data = blueprint_data
class InvalidBlueprintInputs(BlueprintException):
"""When we encountered invalid blueprint inputs."""
def __init__(self, domain: str, msg: str):
"""Initialize an invalid blueprint inputs error."""
super().__init__(
domain,
f"Invalid blueprint inputs: {msg}",
)
class MissingPlaceholder(BlueprintWithNameException):
"""When we miss a placeholder."""
def __init__(
self, domain: str, blueprint_name: str, placeholder_names: Iterable[str]
) -> None:
"""Initialize blueprint exception."""
super().__init__(
domain,
blueprint_name,
f"Missing placeholder {', '.join(sorted(placeholder_names))}",
)
class FileAlreadyExists(BlueprintWithNameException):
"""Error when file already exists."""
def __init__(self, domain: str, blueprint_name: str) -> None:
"""Initialize blueprint exception."""
super().__init__(domain, blueprint_name, "Blueprint already exists")
| 30.247191
| 84
| 0.656761
|
4a12fe10da51e846c7ea8a4a958c6ec66c840fd9
| 228
|
py
|
Python
|
0x05-python-exceptions/100-safe_print_integer_err.py
|
JRodriguez9510/holbertonschool-higher_level_programming-2
|
50e788cc0b03de308ed9bc90007dd19edb2efed5
|
[
"MIT"
] | 1
|
2021-01-27T03:13:32.000Z
|
2021-01-27T03:13:32.000Z
|
0x05-python-exceptions/100-safe_print_integer_err.py
|
JRodriguez9510/holbertonschool-higher_level_programming-2
|
50e788cc0b03de308ed9bc90007dd19edb2efed5
|
[
"MIT"
] | null | null | null |
0x05-python-exceptions/100-safe_print_integer_err.py
|
JRodriguez9510/holbertonschool-higher_level_programming-2
|
50e788cc0b03de308ed9bc90007dd19edb2efed5
|
[
"MIT"
] | 2
|
2021-01-09T04:45:30.000Z
|
2021-07-13T04:23:47.000Z
|
#!/usr/bin/python3
import sys
def safe_print_integer_err(value):
try:
print("{:d}".format(value))
return True
except Exception as e:
print("Exception:", e, file=sys.stderr)
return False
| 19
| 47
| 0.609649
|
4a12fe42fe8cea9434b1976e4c9a0465e43fe733
| 166
|
py
|
Python
|
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_MovingMedian_Seasonal_MonthOfYear_AR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_MovingMedian_Seasonal_MonthOfYear_AR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | 1
|
2019-11-30T23:39:38.000Z
|
2019-12-01T04:34:35.000Z
|
tests/model_control/detailed/transf_Logit/model_control_one_enabled_Logit_MovingMedian_Seasonal_MonthOfYear_AR.py
|
jmabry/pyaf
|
afbc15a851a2445a7824bf255af612dc429265af
|
[
"BSD-3-Clause"
] | null | null | null |
import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Logit'] , ['MovingMedian'] , ['Seasonal_MonthOfYear'] , ['AR'] );
| 41.5
| 88
| 0.759036
|
4a12fe4e27fe9228ac65eb1e6923c1fc96e811ed
| 1,721
|
py
|
Python
|
tests/conftest.py
|
ThomasLoke/pennylane-lightning
|
2eac157abb47413d761a3c89f16b8089833952b9
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
ThomasLoke/pennylane-lightning
|
2eac157abb47413d761a3c89f16b8089833952b9
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
ThomasLoke/pennylane-lightning
|
2eac157abb47413d761a3c89f16b8089833952b9
|
[
"Apache-2.0"
] | 1
|
2021-02-25T19:35:20.000Z
|
2021-02-25T19:35:20.000Z
|
# Copyright 2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pytest configuration file for PennyLane-Lightning test suite.
"""
import os
import pytest
import numpy as np
import pennylane as qml
from pennylane_lightning import LightningQubit
# defaults
TOL = 1e-6
U = np.array(
[
[0.83645892 - 0.40533293j, -0.20215326 + 0.30850569j],
[-0.23889780 - 0.28101519j, -0.88031770 - 0.29832709j],
]
)
U2 = np.array([[0, 1, 1, 1], [1, 0, 1, -1], [1, -1, 0, 1], [1, 1, -1, 0]]) / np.sqrt(3)
A = np.array([[1.02789352, 1.61296440 - 0.3498192j], [1.61296440 + 0.3498192j, 1.23920938 + 0j]])
@pytest.fixture(scope="session")
def tol():
"""Numerical tolerance for equality tests."""
return float(os.environ.get("TOL", TOL))
@pytest.fixture(scope="session", params=[2, 3])
def n_subsystems(request):
"""Number of qubits or qumodes."""
return request.param
@pytest.fixture(scope="function")
def qubit_device_1_wire():
return LightningQubit(wires=1)
@pytest.fixture(scope="function")
def qubit_device_2_wires():
return LightningQubit(wires=2)
@pytest.fixture(scope="function")
def qubit_device_3_wires():
return LightningQubit(wires=3)
| 27.758065
| 97
| 0.706566
|
4a12fe7d25af0e276a31994422d0107caa450ecf
| 1,006
|
py
|
Python
|
lif_template.py
|
achilleas-k/brian-scripts
|
4d2d8c9a53e7202b60c78716e8b1a9d521293c54
|
[
"Apache-2.0"
] | null | null | null |
lif_template.py
|
achilleas-k/brian-scripts
|
4d2d8c9a53e7202b60c78716e8b1a9d521293c54
|
[
"Apache-2.0"
] | null | null | null |
lif_template.py
|
achilleas-k/brian-scripts
|
4d2d8c9a53e7202b60c78716e8b1a9d521293c54
|
[
"Apache-2.0"
] | null | null | null |
from brian import *
duration = 1*second
N_sims = 1
lif_eq = ['dV/dt = (V_rest-V)/tau_mem : volt']
V_rest = 0*mV
V_reset = 0*mV
V_th = 13*mV
t_refr = 2*ms
tau_mem = 10*msecond
N_in = 10
f_in = 50*Hz
DV_s = 2.5*mV
def inputRate(t):
if t < 500*ms:
return 50*Hz
elif 500*ms <= t < 1000*ms:
return 300*Hz
else:
return 50*Hz
inp = PoissonGroup(N_in, rates=inputRate)
nrns = NeuronGroup(N_sims, lif_eq, threshold=V_th, reset=V_reset,\
refractory=t_refr)
con = Connection(inp, nrns, 'V')
con[:,0] = DV_s
nrns.rest()
mem = StateMonitor(nrns, 'V', record=True)
st = SpikeMonitor(nrns)
inp_mon = SpikeMonitor(inp)
run(duration, report='stdout')
for n in range(N_sims):
f_out = len(st.spiketimes[n])/duration
print "Neuron %i firing rate: %s" % (n, f_out)
subplot(2,1,1)
raster_plot(inp_mon)
subplot(2,1,2)
plot(mem.times,mem[0],mem.times,ones(len(mem.times))*V_th)
title('Membrane voltage trace of neuron 0')
xlabel("Time (seconds)")
ylabel("Volts")
show()
| 20.530612
| 66
| 0.66004
|
4a12febd8711353e8f953127360b5c2437beff1e
| 943
|
py
|
Python
|
homeassistant/components/tcp/binary_sensor.py
|
pancho-villa/home-assistant
|
ab17b22239452671f14067571f22aadb9688a3de
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/tcp/binary_sensor.py
|
pancho-villa/home-assistant
|
ab17b22239452671f14067571f22aadb9688a3de
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/tcp/binary_sensor.py
|
pancho-villa/home-assistant
|
ab17b22239452671f14067571f22aadb9688a3de
|
[
"Apache-2.0"
] | null | null | null |
"""
Provides a binary sensor which gets its values from a TCP socket.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.tcp/
"""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.tcp.sensor import (
TcpSensor, CONF_VALUE_ON, PLATFORM_SCHEMA)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the TCP binary sensor."""
add_entities([TcpBinarySensor(hass, config)])
class TcpBinarySensor(BinarySensorDevice, TcpSensor):
"""A binary sensor which is on when its state == CONF_VALUE_ON."""
required = (CONF_VALUE_ON,)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state == self._config[CONF_VALUE_ON]
| 29.46875
| 74
| 0.749735
|
4a12ffdc5d3a46f853b15c9b77dd2791d1a2a49f
| 650
|
py
|
Python
|
NumberGuesser.py
|
WillLio/Minor-Projects
|
c3ed29f33b71b789fae5f3f72851276c28b83157
|
[
"MIT"
] | null | null | null |
NumberGuesser.py
|
WillLio/Minor-Projects
|
c3ed29f33b71b789fae5f3f72851276c28b83157
|
[
"MIT"
] | null | null | null |
NumberGuesser.py
|
WillLio/Minor-Projects
|
c3ed29f33b71b789fae5f3f72851276c28b83157
|
[
"MIT"
] | null | null | null |
import random
def randomg(min, max):
r, g = random.randrange(min, max+1), None
while g != r:
print()
g = int(input("Your guess: "))
print()
if g>max or g<min:
print("That is not even in the range.")
elif g>r:
print("Too high, try a lower number.")
elif g<r:
print("Too low, try a higher number.")
print("You got it.")
print("This is number guesser, choose a range to generate a random integer from.")
print()
min, max = int(input("The minimum: ")), int(input("The maximum: "))
print("Range from {} to {} | ({}, {})" .format(min, max, min, max))
randomg(min, max)
input("Press ENTER to exit the program.")
| 32.5
| 83
| 0.613846
|
4a1301ac76edab4b7ebde140d5188a9965ce8899
| 5,885
|
py
|
Python
|
official/vision/beta/projects/volumetric_models/configs/semantic_segmentation_3d.py
|
NasTul/models
|
4fabd84c5c5e2b34a1b95431788f7801de036421
|
[
"Apache-2.0"
] | 3
|
2022-03-05T10:46:52.000Z
|
2022-03-22T06:00:05.000Z
|
official/vision/beta/projects/volumetric_models/configs/semantic_segmentation_3d.py
|
NasTul/models
|
4fabd84c5c5e2b34a1b95431788f7801de036421
|
[
"Apache-2.0"
] | 4
|
2021-07-17T23:59:03.000Z
|
2021-07-21T10:18:14.000Z
|
official/vision/beta/projects/volumetric_models/configs/semantic_segmentation_3d.py
|
NasTul/models
|
4fabd84c5c5e2b34a1b95431788f7801de036421
|
[
"Apache-2.0"
] | 2
|
2021-08-17T22:07:17.000Z
|
2021-12-25T12:25:47.000Z
|
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Semantic segmentation configuration definition."""
from typing import List, Optional, Union
import dataclasses
from official.core import exp_factory
from official.modeling import hyperparams
from official.modeling import optimization
from official.modeling.hyperparams import config_definitions as cfg
from official.vision.beta.configs import common
from official.vision.beta.projects.volumetric_models.configs import backbones
from official.vision.beta.projects.volumetric_models.configs import decoders
@dataclasses.dataclass
class DataConfig(cfg.DataConfig):
"""Input config for training."""
output_size: List[int] = dataclasses.field(default_factory=list)
input_size: List[int] = dataclasses.field(default_factory=list)
num_classes: int = 0
num_channels: int = 1
input_path: str = ''
global_batch_size: int = 0
is_training: bool = True
dtype: str = 'float32'
label_dtype: str = 'float32'
image_field_key: str = 'image/encoded'
label_field_key: str = 'image/class/label'
shuffle_buffer_size: int = 1000
cycle_length: int = 10
drop_remainder: bool = False
file_type: str = 'tfrecord'
@dataclasses.dataclass
class SegmentationHead3D(hyperparams.Config):
"""Segmentation head config."""
num_classes: int = 0
level: int = 1
num_convs: int = 0
num_filters: int = 256
upsample_factor: int = 1
output_logits: bool = True
use_batch_normalization: bool = True
@dataclasses.dataclass
class SemanticSegmentationModel3D(hyperparams.Config):
"""Semantic segmentation model config."""
num_classes: int = 0
num_channels: int = 1
input_size: List[int] = dataclasses.field(default_factory=list)
min_level: int = 3
max_level: int = 6
head: SegmentationHead3D = SegmentationHead3D()
backbone: backbones.Backbone = backbones.Backbone(
type='unet_3d', unet_3d=backbones.UNet3D())
decoder: decoders.Decoder = decoders.Decoder(
type='unet_3d_decoder', unet_3d_decoder=decoders.UNet3DDecoder())
norm_activation: common.NormActivation = common.NormActivation()
@dataclasses.dataclass
class Losses(hyperparams.Config):
# Supported `loss_type` are `adaptive` and `generalized`.
loss_type: str = 'adaptive'
l2_weight_decay: float = 0.0
@dataclasses.dataclass
class Evaluation(hyperparams.Config):
report_per_class_metric: bool = False # Whether to report per-class metrics.
@dataclasses.dataclass
class SemanticSegmentation3DTask(cfg.TaskConfig):
"""The model config."""
model: SemanticSegmentationModel3D = SemanticSegmentationModel3D()
train_data: DataConfig = DataConfig(is_training=True)
validation_data: DataConfig = DataConfig(is_training=False)
losses: Losses = Losses()
evaluation: Evaluation = Evaluation()
train_input_partition_dims: List[int] = dataclasses.field(
default_factory=list)
eval_input_partition_dims: List[int] = dataclasses.field(default_factory=list)
init_checkpoint: Optional[str] = None
init_checkpoint_modules: Union[
str, List[str]] = 'all' # all, backbone, and/or decoder
@exp_factory.register_config_factory('seg_unet3d_test')
def seg_unet3d_test() -> cfg.ExperimentConfig:
"""Image segmentation on a dummy dataset with 3D UNet for testing purpose."""
train_batch_size = 2
eval_batch_size = 2
steps_per_epoch = 10
config = cfg.ExperimentConfig(
task=SemanticSegmentation3DTask(
model=SemanticSegmentationModel3D(
num_classes=2,
input_size=[32, 32, 32],
num_channels=2,
backbone=backbones.Backbone(
type='unet_3d', unet_3d=backbones.UNet3D(model_id=2)),
decoder=decoders.Decoder(
type='unet_3d_decoder',
unet_3d_decoder=decoders.UNet3DDecoder(model_id=2)),
head=SegmentationHead3D(num_convs=0, num_classes=2),
norm_activation=common.NormActivation(
activation='relu', use_sync_bn=False)),
train_data=DataConfig(
input_path='train.tfrecord',
num_classes=2,
input_size=[32, 32, 32],
num_channels=2,
is_training=True,
global_batch_size=train_batch_size),
validation_data=DataConfig(
input_path='val.tfrecord',
num_classes=2,
input_size=[32, 32, 32],
num_channels=2,
is_training=False,
global_batch_size=eval_batch_size),
losses=Losses(loss_type='adaptive')),
trainer=cfg.TrainerConfig(
steps_per_loop=steps_per_epoch,
summary_interval=steps_per_epoch,
checkpoint_interval=steps_per_epoch,
train_steps=10,
validation_steps=10,
validation_interval=steps_per_epoch,
optimizer_config=optimization.OptimizationConfig({
'optimizer': {
'type': 'sgd',
},
'learning_rate': {
'type': 'constant',
'constant': {
'learning_rate': 0.000001
}
}
})),
restrictions=[
'task.train_data.is_training != None',
'task.validation_data.is_training != None'
])
return config
| 35.666667
| 80
| 0.690229
|
4a13021ae90f3548c2662672f26fefa5ea87b1fa
| 9,162
|
py
|
Python
|
main.py
|
phubbard/entropy
|
bfe619d1bb64e45f3aa30bc1adbbdf4e203723da
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
phubbard/entropy
|
bfe619d1bb64e45f3aa30bc1adbbdf4e203723da
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
phubbard/entropy
|
bfe619d1bb64e45f3aa30bc1adbbdf4e203723da
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
"""
@author Paul Hubbard
@date 5/7/14
@file main.py
@brief Starting new project for home energy monitoring, using Graphite plus MQTT.
"""
import json
import datetime
import calendar
import time
from ConfigParser import SafeConfigParser
from xml.etree import ElementTree as ET
import logging as log
import sys
import serial
import requests
import plotly.tools as tls
import plotly.plotly as py
from plotly.graph_objs import *
from version import VERSION
def get_demand_chunk(serial):
buf = ''
in_element = False
closestring = '</InstantaneousDemand>'
while True:
in_buf = serial.readline()
in_buf_stripped = in_buf.strip()
log.debug('>' + in_buf_stripped)
if not in_element:
if in_buf_stripped == '<InstantaneousDemand>':
in_element = True
buf += in_buf
closestring = '</InstantaneousDemand>'
continue
elif in_buf_stripped == '<CurrentSummationDelivered>':
in_element = True
buf += in_buf
closestring = '</CurrentSummationDelivered>'
continue
else: # Keep waiting for start of element we want
continue
if in_element:
buf += in_buf
if in_buf_stripped == closestring:
log.debug('got end of xml')
return buf
def process_demand(elem):
"""
Process the InstantaneoousDemand element - convert to decimal,
shift timestamp, do proper scaling. Code borrows heavily from the
raven-cosm project.
"""
seconds_since_2000 = int(elem.find('TimeStamp').text, 16)
multiplier = int(elem.find('Multiplier').text, 16)
divisor = int(elem.find('Divisor').text, 16)
epoch_offset = calendar.timegm(time.strptime("2000-01-01", "%Y-%m-%d"))
gmt = datetime.datetime.utcfromtimestamp(seconds_since_2000 + epoch_offset).isoformat()
try:
demand = int(elem.find('Demand').text, 16)
if seconds_since_2000 and demand and multiplier and divisor:
#if you have solar, during the middle of the day, you'll see this number spike up to something which needs to be interpreted as a negative number
if 1000.0*demand * multiplier/divisor > 32768.0:
demand = -(0xffffffff - demand + 1)
return({"at": gmt +'Z', "atinsec": seconds_since_2000, "demand": str(1000.0 * demand * multiplier / divisor), "type": 0})
except:
log.info("not a demand packet")
try:
#again, mainly an issue in case of solar, where both the summation delivered and summation received numbers can change (instead of the meter going up and down, both numbers
#go up, and the difference is your actual meter.
summationdelivered = int(elem.find('SummationDelivered').text,16)
summationreceived = int(elem.find('SummationReceived').text,16)
if seconds_since_2000 and summationdelivered and multiplier and divisor:
return({"at": gmt +'Z', "atinsec": seconds_since_2000, "summationdelivered": str(1000.0*summationdelivered*multiplier/divisor), "type": 1, "summationreceived": str(1000.0*summationreceived*multiplier/divisor)})
except:
log.info("not a meter reading packet either")
def loop(serial, plotly_stream1, plotly_stream2):
"""
Read a chunk, buffer until complete, parse and send it on.
"""
log.info('Loop starting')
havereading = False
havenewreading = False
while True:
log.debug('reading from serial')
data_chunk = get_demand_chunk(serial)
log.debug('Parsing XML')
try:
elem = ET.fromstring(data_chunk)
demand = process_demand(elem)
x = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
#type 1 is a CurrentSummation Packet (a meter reading packet)
if demand['type'] == 1:
if havereading:
proposedreading = (float(demand['summationdelivered']) - float(demand['summationreceived']))/1000.0
if proposedreading != hardmeterreading:
meterreading = proposedreading
hardmeterreading = meterreading
readingtime = demand['atinsec']
havenewreading = True
log.info('Actual Meter reading: ' + str(meterreading) + 'kWh')
y = hardmeterreading
datum = dict(x=x,y=y)
log.debug(datum)
plotly_stream2.write(datum)
else:
log.info('Ignoring repeated Meter Reading')
else:
havereading = True
meterreading = (float(demand['summationdelivered']) - float(demand['summationreceived']))/1000.0
hardmeterreading = meterreading
readingtime = demand['atinsec']
log.info("Meter reading: " + str(meterreading) + "kWh (possibly stale reading)")
#type 0 is a InstantaneousDemand Packet
if demand['type'] == 0:
y = float(demand['demand'])
datum = dict(x=x, y=y)
log.debug(datum)
plotly_stream1.write(datum)
if havenewreading:
previousreadingtime = readingtime
previousmeterreading = meterreading
previousreadingtime = readingtime
readingtime = demand['atinsec']
meterreading = previousmeterreading + 1.0*(int(readingtime) - int(previousreadingtime))*float(demand['demand'])/(60*60*1000)
log.info('Current Usage: ' + demand['demand'] + 'W')
log.info('Approximate Meter Reading: ' + str(meterreading) + 'kWh')
log.info('Last Actual Meter Reading: ' + str(hardmeterreading) + 'kWh')
y = meterreading
datum = dict(x=x,y=y)
log.debug(datum)
plotly_stream2.write(datum)
elif havereading:
previousmeterreading = meterreading
previousreadingtime = readingtime
readingtime = demand['atinsec']
meterreading = previousmeterreading + 1.0*(int(readingtime) - int(previousreadingtime))*float(demand['demand'])/(60*60*1000)
log.info('Current Usage: ' + demand['demand'])
log.info('Approximate Meter Reading: ' + str(meterreading) + 'kWh, but based on possibly stale meter reading.')
log.info('Last Actual Meter Reading: ' + str(hardmeterreading) + 'kWh (possibly stale reading)')
#it is not an oversight that we haven't written to plotly here. The readings are
#based on the actual meter reading that may be up to 4-5 minutes old. Once we receive a *new* meter reading, we'll start writing
#all of the meter readings, actual when the meter reading packets come, and approximate in between.
else:
log.info('Current Usage: ' + demand['demand'] + 'W')
log.info('Meter not yet read')
except:
log.info('Ignoring parse errors')
continue
# TODO read dweet thing name from config.ini
# try:
# requests.post('https://dweet.io/dweet/for/42df176b534c415e9681df5e28e348b1',
# params=demand)
# except ConnectionError, ce:
# log.warn('Unable to dweet')
# Off to plotly too
# TODO return pre-set X and Y from process_demand
def plotly_setup(stream_id, nameoffile):
# Working from https://plot.ly/python/streaming-tutorial/
trace1 = Scatter(x=[], y=[], stream=dict(token=stream_id))
data = Data([trace1])
url = py.plot(data, filename=nameoffile)
log.debug(url)
s = py.Stream(stream_id)
s.open()
return s
def setup():
log.basicConfig(level=log.DEBUG, format='%(asctime)s %(levelname)s [%(funcName)s] %(message)s')
cfg_file = 'config.ini'
if (len(sys.argv) == 2):
cfg_file = sys.argv[1]
log.info('Reading configuration file ' + cfg_file)
cf = SafeConfigParser()
cf.read(cfg_file)
log.info('Opening Raven...')
serial_port = serial.Serial(cf.get('raven', 'port'), cf.getint('raven', 'baud'))
log.info('Opening plot.ly...')
#I don't know if two stream_ids are needed or not, but I used two. Filenames can be changed too.
strm = plotly_setup(cf.get('plotly', 'stream_id1'), 'netusage')
strm2 = plotly_setup(cf.get('plotly', 'stream_id2'), 'electricmeter')
log.info('Starting loop...')
loop(serial_port, strm, strm2)
if __name__ == '__main__':
setup()
| 41.457014
| 238
| 0.579568
|
4a1302629932aaf88f7e6e46552bbd387d4517d2
| 437
|
py
|
Python
|
examples/widgets/label_with_markup.py
|
Galland/kivy
|
95a6bf279883d706f645e4629c16d5ee1038f0ec
|
[
"MIT"
] | 13,889
|
2015-01-01T06:43:41.000Z
|
2022-03-31T17:37:56.000Z
|
examples/widgets/label_with_markup.py
|
Galland/kivy
|
95a6bf279883d706f645e4629c16d5ee1038f0ec
|
[
"MIT"
] | 4,570
|
2015-01-01T17:58:52.000Z
|
2022-03-31T18:42:16.000Z
|
examples/widgets/label_with_markup.py
|
Galland/kivy
|
95a6bf279883d706f645e4629c16d5ee1038f0ec
|
[
"MIT"
] | 3,786
|
2015-01-01T09:20:45.000Z
|
2022-03-30T21:15:05.000Z
|
from kivy.app import App
from kivy.lang import Builder
root = Builder.load_string('''
Label:
text:
('[b]Hello[/b] [color=ff0099]World[/color]\\n'
'[color=ff0099]Hello[/color] [b]World[/b]\\n'
'[b]Hello[/b] [color=ff0099]World[/color]')
markup: True
font_size: '64pt'
''')
class LabelWithMarkup(App):
def build(self):
return root
if __name__ == '__main__':
LabelWithMarkup().run()
| 19.863636
| 54
| 0.608696
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.