hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
aceeb6d206fa040dfb62760a53527c68b305f721 | 877 | py | Python | docs/api/jpnToHexList.py | secondsabre/Tales-of-Destiny-DC | f46ee1867d29b38b565ae621cd7aca74d3b7f575 | [
"Unlicense"
] | 96 | 2021-01-24T01:01:12.000Z | 2022-02-16T10:33:03.000Z | docs/api/jpnToHexList.py | secondsabre/Tales-of-Destiny-DC | f46ee1867d29b38b565ae621cd7aca74d3b7f575 | [
"Unlicense"
] | 74 | 2021-03-05T03:30:55.000Z | 2022-01-09T03:11:12.000Z | docs/api/jpnToHexList.py | secondsabre/Tales-of-Destiny-DC | f46ee1867d29b38b565ae621cd7aca74d3b7f575 | [
"Unlicense"
] | 23 | 2021-03-04T02:59:57.000Z | 2022-02-12T21:11:19.000Z | from http.server import BaseHTTPRequestHandler
from urllib.parse import urlparse
import json
from ._scripts.JpnToHexList import jpnToHexList
class handler(BaseHTTPRequestHandler):
def do_POST(self):
content_len = int(self.headers['content-length'])
post_body = self.rfile.read(content_len)
data = json.loads(post_body)
parsed_path = urlparse(self.path)
converted = jpnToHexList(data['input'])
print(converted)
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps({
'method': self.command,
'path': self.path,
'real_path': parsed_path.query,
'query': parsed_path.query,
'request_version': self.request_version,
'protocol_version': self.protocol_version,
'body': converted}).encode())
return
| 33.730769 | 57 | 0.641961 |
aceeb74648caa6c76059c1f28ac7167ef537c92b | 2,196 | py | Python | setup.py | yinziyan1206/x-base | dc74124ad9b07b799ef03917a0e9a882a062ac40 | [
"BSD-2-Clause"
] | null | null | null | setup.py | yinziyan1206/x-base | dc74124ad9b07b799ef03917a0e9a882a062ac40 | [
"BSD-2-Clause"
] | null | null | null | setup.py | yinziyan1206/x-base | dc74124ad9b07b799ef03917a0e9a882a062ac40 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
from Cython.Build import cythonize
from setuptools import setup, find_packages, Extension
PACKAGE = 'basex'
NAME = 'basex'
PY_VER = sys.version_info
REQUIREMENTS = [
'orjson',
'loguru',
'PyYAML',
'pybase64',
'pydantic',
'dynaconf',
'sqlalchemy[asyncio]>=1.4'
]
package = __import__(PACKAGE)
VERSION = package.__version__
AUTHOR = package.__author__
DESCRIBE = package.__describe__
CLASSIFIERS = [
'Intended Audience :: Developers',
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
'Operating System :: OS Independent',
'Environment :: Web Environment',
'Development Status :: 3 - Alpha',
'Topic :: Internet',
'Topic :: Software Development :: Libraries :: Application Frameworks',
]
KEYWORDS = ["api", "x-api", "x-base", "basex"]
packages = find_packages()
def get_extensions():
extensions = []
for file in os.listdir('basex/native'):
if file.endswith('.pyx'):
module_name = file.removesuffix(".pyx")
modules = [f'basex/native/{file}']
if os.path.exists(f'basex/native/lib/{module_name}.c'):
modules.append(f'basex/native/lib/{module_name}.c')
extensions.append(
Extension(
f'basex.native.{module_name}',
modules
)
)
return extensions
setup(
name=NAME,
version=VERSION,
author=AUTHOR,
author_email="408856732@qq.com",
description=DESCRIBE,
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
license="BSD 2-Clause",
url="https://github.com/yinziyan1206/x-base",
packages=packages,
install_requires=REQUIREMENTS,
keywords=KEYWORDS,
classifiers=CLASSIFIERS,
ext_modules=cythonize(
get_extensions(),
compiler_directives={"language_level": 3},
)
)
| 26.780488 | 75 | 0.630692 |
aceeb751039d8bd09df329bc0d94e203d7e6cddb | 419 | py | Python | share/migrations/0002_auto_20200512_2042.py | VotarkSocial/votarkAPI | eea10a64ac0b255c97078b90786fccb30d0a451e | [
"MIT"
] | 2 | 2020-06-14T08:25:29.000Z | 2021-09-22T07:48:11.000Z | share/migrations/0002_auto_20200512_2042.py | suulcoder/votarkAPI | eea10a64ac0b255c97078b90786fccb30d0a451e | [
"MIT"
] | 10 | 2020-06-14T08:36:42.000Z | 2022-03-12T00:30:53.000Z | share/migrations/0002_auto_20200512_2042.py | suulcoder/votarkAPI | eea10a64ac0b255c97078b90786fccb30d0a451e | [
"MIT"
] | 1 | 2021-09-22T07:48:17.000Z | 2021-09-22T07:48:17.000Z | # Generated by Django 3.0.4 on 2020-05-12 20:42
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('share', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='share',
name='date',
field=models.DateField(default=django.utils.timezone.now),
),
]
| 20.95 | 70 | 0.613365 |
aceeb846997bca19cbb6c7871f7818e22c128e13 | 595 | py | Python | app.py | lukoucky/restaurant_booking | bc6f0c655653571b5e190614168c2534f809992d | [
"MIT"
] | null | null | null | app.py | lukoucky/restaurant_booking | bc6f0c655653571b5e190614168c2534f809992d | [
"MIT"
] | null | null | null | app.py | lukoucky/restaurant_booking | bc6f0c655653571b5e190614168c2534f809992d | [
"MIT"
] | null | null | null | import os
from flask import Flask, request, abort, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_cors import CORS
from models import setup_db, RestaurantTable
def create_app(test_config=None):
# create and configure the app
app = Flask(__name__)
if 'DATABASE_URL' in os.environ:
setup_db(app, os.environ['DATABASE_URL'])
else:
setup_db(app)
CORS(app)
@app.route('/')
def index():
return 'Hello from Heroku, env variable: ' + os.environ['ENV_VALUE']
return app
app = create_app()
if __name__ == '__main__':
app.run()
| 22.037037 | 76 | 0.680672 |
aceeb89fef1559aaecc2d7f19ae6f1d3c1924463 | 1,337 | py | Python | main.py | 0xtr/minimud_python | 7241e3b904cd39426adbe0d884b0d6782bff82bf | [
"MIT"
] | null | null | null | main.py | 0xtr/minimud_python | 7241e3b904cd39426adbe0d884b0d6782bff82bf | [
"MIT"
] | null | null | null | main.py | 0xtr/minimud_python | 7241e3b904cd39426adbe0d884b0d6782bff82bf | [
"MIT"
] | null | null | null | import random
import selectors
import socket
import sys
import pdb
from src.io import IncomingHandler
from src.io.MessageQueue import MessageQueue
from src.sqlitehelper import SQLiteHelper
# debugging
# pdb.set_trace()
port = random.randint(5000, 6000)
print("Use port " + str(port) + " for connections\n")
# open the sqlite3 dbs
dbManager = SQLiteHelper.SQLDBConnector()
assert dbManager.connectedToAllDatabases
# create the master socket
listensock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
listensock.setblocking(0)
# bind it to our chosen port
try:
listensock.bind(("", port))
except Exception as e:
print(e.args)
sys.exit(1)
# set listener for connections
listensock.listen()
def accept(sock, mask):
newsock, address = sock.accept()
newsock.setblocking(False)
print("connection from " + str(newsock) + " at " + str(address))
MessageQueue.initQueue(newsock)
selector.register(newsock, selectors.EVENT_READ, read)
# TODO: welcome them nicely
def read(sock, mask):
IncomingHandler.incoming_handler(sock)
# TODO: store selector in class
selector = selectors.DefaultSelector()
selector.register(listensock, selectors.EVENT_READ, accept)
while True:
events = selector.select()
for key, mask in events:
callback = key.data
callback(key.fileobj, mask)
| 23.051724 | 68 | 0.738968 |
aceeb9308a0c43bcf29b9617e41d6d9b28a2dbf1 | 10,667 | py | Python | tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_BR-2022] 1.py | xeals/holidata | c99a56b63b1cb9dc5f4f79f3de83ba3865215250 | [
"MIT"
] | null | null | null | tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_BR-2022] 1.py | xeals/holidata | c99a56b63b1cb9dc5f4f79f3de83ba3865215250 | [
"MIT"
] | null | null | null | tests/snapshots/snap_test_holidata/test_holidata_produces_holidays_for_locale_and_year[pt_BR-2022] 1.py | xeals/holidata | c99a56b63b1cb9dc5f4f79f3de83ba3865215250 | [
"MIT"
] | null | null | null | [
{
'date': '2022-01-01',
'description': 'Confraternização Universal',
'locale': 'pt-BR',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2022-01-04',
'description': 'Criação do Estado de Rondônia',
'locale': 'pt-BR',
'notes': '',
'region': 'RO',
'type': 'F'
},
{
'date': '2022-01-23',
'description': 'Dia do Evangélico no Acre',
'locale': 'pt-BR',
'notes': '',
'region': 'AC',
'type': 'RF'
},
{
'date': '2022-03-01',
'description': 'Carnaval',
'locale': 'pt-BR',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2022-03-06',
'description': 'Revolução Pernambucana de 1817',
'locale': 'pt-BR',
'notes': '',
'region': 'PE',
'type': 'F'
},
{
'date': '2022-03-08',
'description': 'Dia Internacional da Mulher',
'locale': 'pt-BR',
'notes': '',
'region': 'AC',
'type': 'F'
},
{
'date': '2022-03-18',
'description': 'Autonomia do Estado de Tocantins',
'locale': 'pt-BR',
'notes': '',
'region': 'TO',
'type': 'F'
},
{
'date': '2022-03-19',
'description': 'Dia de São José',
'locale': 'pt-BR',
'notes': '',
'region': 'AP',
'type': 'RF'
},
{
'date': '2022-03-19',
'description': 'Dia de São José',
'locale': 'pt-BR',
'notes': '',
'region': 'CE',
'type': 'RF'
},
{
'date': '2022-03-25',
'description': 'Abolição da Escravidão no Ceará',
'locale': 'pt-BR',
'notes': '',
'region': 'CE',
'type': 'F'
},
{
'date': '2022-04-17',
'description': 'Páscoa',
'locale': 'pt-BR',
'notes': '',
'region': '',
'type': 'NRV'
},
{
'date': '2022-04-21',
'description': 'Fundação de Brasília',
'locale': 'pt-BR',
'notes': '',
'region': 'DF',
'type': 'F'
},
{
'date': '2022-04-21',
'description': 'Execução de Tiradentes',
'locale': 'pt-BR',
'notes': '',
'region': 'MG',
'type': 'F'
},
{
'date': '2022-04-21',
'description': 'Tiradentes',
'locale': 'pt-BR',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2022-04-23',
'description': 'Dia de São Jorge',
'locale': 'pt-BR',
'notes': '',
'region': 'RJ',
'type': 'RF'
},
{
'date': '2022-05-01',
'description': 'Dia Internacional do Trabalhador',
'locale': 'pt-BR',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2022-06-15',
'description': 'Aniversário do Estado do Acre',
'locale': 'pt-BR',
'notes': '',
'region': 'AC',
'type': 'F'
},
{
'date': '2022-06-18',
'description': 'Dia do Evangélico em Rondônia',
'locale': 'pt-BR',
'notes': '',
'region': 'RO',
'type': 'RF'
},
{
'date': '2022-06-24',
'description': 'São João',
'locale': 'pt-BR',
'notes': '',
'region': 'AL',
'type': 'RF'
},
{
'date': '2022-06-24',
'description': 'São João',
'locale': 'pt-BR',
'notes': '',
'region': 'PE',
'type': 'RF'
},
{
'date': '2022-06-29',
'description': 'São Pedro',
'locale': 'pt-BR',
'notes': '',
'region': 'AL',
'type': 'RF'
},
{
'date': '2022-07-02',
'description': 'Independência da Bahia',
'locale': 'pt-BR',
'notes': '',
'region': 'BA',
'type': 'F'
},
{
'date': '2022-07-08',
'description': 'Emancipação Política de Sergipe',
'locale': 'pt-BR',
'notes': '',
'region': 'SE',
'type': 'F'
},
{
'date': '2022-07-09',
'description': 'Revolução Constitucionalista de 1932',
'locale': 'pt-BR',
'notes': '',
'region': 'SP',
'type': 'F'
},
{
'date': '2022-07-26',
'description': 'Fundação da Cidade de Goiás',
'locale': 'pt-BR',
'notes': '',
'region': 'GO',
'type': 'F'
},
{
'date': '2022-07-28',
'description': 'Adesão do Maranhão à Independência do Brasil',
'locale': 'pt-BR',
'notes': '',
'region': 'MA',
'type': 'F'
},
{
'date': '2022-08-05',
'description': 'Fundação do Estado da Paraíba',
'locale': 'pt-BR',
'notes': '',
'region': 'PB',
'type': 'F'
},
{
'date': '2022-08-07',
'description': 'Dia do Rio Grande do Norte',
'locale': 'pt-BR',
'notes': '',
'region': 'RN',
'type': 'F'
},
{
'date': '2022-08-11',
'description': 'Dia de Santa Catarina',
'locale': 'pt-BR',
'notes': '',
'region': 'SC',
'type': 'F'
},
{
'date': '2022-08-15',
'description': 'Dia de Nossa Senhora da Assunção',
'locale': 'pt-BR',
'notes': '',
'region': 'CE',
'type': 'RF'
},
{
'date': '2022-08-15',
'description': 'Adesão do Pará à Independência do Brasil',
'locale': 'pt-BR',
'notes': '',
'region': 'PA',
'type': 'F'
},
{
'date': '2022-09-05',
'description': 'Dia da Amazônia',
'locale': 'pt-BR',
'notes': '',
'region': 'AC',
'type': 'F'
},
{
'date': '2022-09-05',
'description': 'Elevação do Amazonas à Categoria de Província',
'locale': 'pt-BR',
'notes': '',
'region': 'AM',
'type': 'F'
},
{
'date': '2022-09-07',
'description': 'Independência do Brasil',
'locale': 'pt-BR',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2022-09-08',
'description': 'Nossa Senhora da Natividade',
'locale': 'pt-BR',
'notes': '',
'region': 'TO',
'type': 'F'
},
{
'date': '2022-09-13',
'description': 'Criação do Território Federal do Amapá',
'locale': 'pt-BR',
'notes': '',
'region': 'AP',
'type': 'F'
},
{
'date': '2022-09-16',
'description': 'Emancipação Política do Alagoas',
'locale': 'pt-BR',
'notes': '',
'region': 'AL',
'type': 'F'
},
{
'date': '2022-09-20',
'description': 'Dia do Gaúcho',
'locale': 'pt-BR',
'notes': '',
'region': 'RS',
'type': 'F'
},
{
'date': '2022-10-03',
'description': 'Mártires de Cunhaú e Uruaçu',
'locale': 'pt-BR',
'notes': '',
'region': 'RN',
'type': 'F'
},
{
'date': '2022-10-05',
'description': 'Criação dos Estado de Roraima',
'locale': 'pt-BR',
'notes': '',
'region': 'RR',
'type': 'F'
},
{
'date': '2022-10-05',
'description': 'Criação dos Estado de Tocantins',
'locale': 'pt-BR',
'notes': '',
'region': 'TO',
'type': 'F'
},
{
'date': '2022-10-11',
'description': 'Criação do Estado do Mato Grosso do Sul',
'locale': 'pt-BR',
'notes': '',
'region': 'MS',
'type': 'F'
},
{
'date': '2022-10-12',
'description': 'Nossa Senhora Aparecida',
'locale': 'pt-BR',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2022-10-19',
'description': 'Dia do Piauí',
'locale': 'pt-BR',
'notes': '',
'region': 'PI',
'type': 'F'
},
{
'date': '2022-10-24',
'description': 'Pedra Fundamental de Goiânia',
'locale': 'pt-BR',
'notes': '',
'region': 'GO',
'type': 'F'
},
{
'date': '2022-11-02',
'description': 'Finados',
'locale': 'pt-BR',
'notes': '',
'region': '',
'type': 'NRF'
},
{
'date': '2022-11-15',
'description': 'Proclamação da República',
'locale': 'pt-BR',
'notes': '',
'region': '',
'type': 'NF'
},
{
'date': '2022-11-17',
'description': 'Assinatura do Tratado de Petrópolis',
'locale': 'pt-BR',
'notes': '',
'region': 'AC',
'type': 'F'
},
{
'date': '2022-11-20',
'description': 'Morte de Zumbi dos Palmares',
'locale': 'pt-BR',
'notes': '',
'region': 'AL',
'type': 'F'
},
{
'date': '2022-11-20',
'description': 'Dia da Consciência Negra',
'locale': 'pt-BR',
'notes': '',
'region': 'AM',
'type': 'F'
},
{
'date': '2022-11-20',
'description': 'Dia da Consciência Negra',
'locale': 'pt-BR',
'notes': '',
'region': 'MT',
'type': 'F'
},
{
'date': '2022-11-20',
'description': 'Dia da Consciência Negra',
'locale': 'pt-BR',
'notes': '',
'region': 'RJ',
'type': 'F'
},
{
'date': '2022-11-25',
'description': 'Dia de Santa Catarina de Alexandria',
'locale': 'pt-BR',
'notes': '',
'region': 'SC',
'type': 'RF'
},
{
'date': '2022-11-30',
'description': 'Dia do Evangélico do Distrito Federal',
'locale': 'pt-BR',
'notes': '',
'region': 'DF',
'type': 'RF'
},
{
'date': '2022-12-08',
'description': 'Nossa Senhora da Conceição',
'locale': 'pt-BR',
'notes': '',
'region': 'AM',
'type': 'RF'
},
{
'date': '2022-12-19',
'description': 'Emancipação Política do Estado do Paraná',
'locale': 'pt-BR',
'notes': '',
'region': 'PR',
'type': 'F'
},
{
'date': '2022-12-25',
'description': 'Natal',
'locale': 'pt-BR',
'notes': '',
'region': '',
'type': 'NRF'
}
] | 23.290393 | 71 | 0.390082 |
aceeb9c7d1333e3bcf5fc836a0f0a2c2f14fbf69 | 212 | py | Python | elaina/utils/filter_groups.py | Vishal324140/ElainaRobot | d72092e9d2ddc9f94f21374cad57aea390612586 | [
"MIT"
] | 1 | 2022-01-31T08:44:33.000Z | 2022-01-31T08:44:33.000Z | elaina/utils/filter_groups.py | Vishal324140/ElainaRobot | d72092e9d2ddc9f94f21374cad57aea390612586 | [
"MIT"
] | 8 | 2022-01-30T22:22:47.000Z | 2022-03-13T03:01:18.000Z | elaina/utils/filter_groups.py | animeSubbingTeam/MitsuhaTaki | 64ff3ba483656bc1246c863ebffbaddd76cb145f | [
"MIT"
] | 1 | 2022-02-14T03:44:41.000Z | 2022-02-14T03:44:41.000Z | chat_filters_group = 1
chatbot_group = 2
regex_group = 5
welcome_captcha_group = 6
antiflood_group = 7
nsfw_detect_group = 8
blacklist_filters_group = 9
pipes_group = 10
taglog_group = 11
chat_watcher_group = 12
| 19.272727 | 27 | 0.811321 |
aceebb1e04eb59a9177f9615b28d5d2c2c588088 | 5,234 | py | Python | analyse_food/wrap_heatmap.py | matt2042/caprica451 | 2a95bf0a268db40d8f7cf0654b4a7a21edbf66be | [
"MIT"
] | null | null | null | analyse_food/wrap_heatmap.py | matt2042/caprica451 | 2a95bf0a268db40d8f7cf0654b4a7a21edbf66be | [
"MIT"
] | null | null | null | analyse_food/wrap_heatmap.py | matt2042/caprica451 | 2a95bf0a268db40d8f7cf0654b4a7a21edbf66be | [
"MIT"
] | null | null | null | """
wrap heatmaps module ready for correlating output from heatmap and ssd modules
"""
#############################################################################
# Imports
#############################################################################
import matplotlib.pyplot as plt
plt.ion()
from scipy.misc import imread
import numpy as np
from keras.preprocessing import image as image_proc
from keras import backend as K
from keras.applications.resnet50 import ResNet50, preprocess_input
from heatmap import to_heatmap, synset_to_dfs_ids
#############################################################################
# Constants
#############################################################################
IMAGE_NET_DOG_CODE = "n02084071"
IMAGE_HEIGHT = 800
IMAGE_WIDTH = 1280
#############################################################################
# Settings
#############################################################################
# image_filename = "/Users/colinrawlings/Desktop/htc18/refs/ssd_keras-1-master/caprica451/examples/bananas.jpg"
# image_filename = "/Users/colinrawlings/Desktop/htc18/caprica451/examples/banana_rotten.jpg"
image_filename = "/Users/colinrawlings/Desktop/htc18/caprica451/examples/banana_02.jpg"
# image_filename = "/Users/colinrawlings/Desktop/htc18/caprica451/examples/banana_pair.jpg"
object_name = "banana"
threshold = 0.75
#############################################################################
# definitions
#############################################################################
def calculate_heatmap(original_image, new_model, image, ids, preprocessing=None):
# The quality is reduced.
# If you have more than 8GB of RAM, you can try to increase it.
from PIL import Image
import numpy as np
x = image_proc.img_to_array(image)
x = np.expand_dims(x, axis=0)
if preprocessing is not None:
x = preprocess_input(x)
print("prediction starting")
out = new_model.predict(x)
print("prediction finished")
heatmap = out[0] # Removing batch axis.
if K.image_data_format() == 'channels_first':
heatmap = heatmap[ids]
if heatmap.ndim == 3:
heatmap = np.sum(heatmap, axis=0)
else:
heatmap = heatmap[:, :, ids]
if heatmap.ndim == 3:
heatmap = np.sum(heatmap, axis=2)
# resize back to original dimensions
pil_heatmap = Image.fromarray(heatmap)
resized_pil_heatmap = pil_heatmap.resize((original_image.shape[1], original_image.shape[0]),
Image.BICUBIC)
resized_np_heatmap = np.array(resized_pil_heatmap)
return resized_np_heatmap
#############################################################################
def analyse_heatmap(threshold, original_image, heatmap):
"""
:param original_image
:param heatmap:
:return: uint8 numpy.array masked_image, mask
"""
mask = heatmap > threshold
np_image = np.array(original_image)
masked_np_image = np.zeros(np.shape(np_image))
for channel in range(3):
masked_np_image[:, :, channel] = mask * np_image[:, :, channel]
masked_np_image = np.asarray(masked_np_image, dtype=np.uint8)
return masked_np_image, mask
#############################################################################
def display_graphical_results(original_image, heatmap, masked_image):
"""
:param original_image:
:param heatmap:
:param masked_image:
:return: fig, axs
"""
fig, axs = plt.subplots(3, 1)
axs[0].imshow(original_image, interpolation="none")
axs[0].contour(heatmap, [threshold, 1.1])
axs[1].imshow(heatmap, interpolation="none")
axs[1].contour(heatmap, [threshold, 1.1])
axs[2].imshow(masked_image, interpolation="none")
return fig, axs
#############################################################################
def calc_masked_image(image_filename, object_name, mask_threshold=0.5):
"""
:param image_filename:
:param object_name:
:param mask_threshold:
:return: masked_image, mask, fig, axs
"""
from heatmap.imagenet1000_clsid_to_human import get_imagenet_classes_from_names
class_ids = get_imagenet_classes_from_names()
# model
class_id = class_ids[object_name]
model = ResNet50()
new_model = to_heatmap(model)
# calc
original_image = imread(image_filename)
image = image_proc.load_img(image_filename, target_size=(IMAGE_HEIGHT, IMAGE_WIDTH))
heatmap = calculate_heatmap(original_image, new_model, image, class_id, preprocess_input)
#
masked_image, mask = analyse_heatmap(threshold, original_image, heatmap)
fig, axs = display_graphical_results(original_image, heatmap, masked_image)
return masked_image, mask, fig, axs
#############################################################################
# main
#############################################################################
if __name__ == "__main__":
masked_image, mask, fig, axs = calc_masked_image(image_filename,
object_name,
mask_threshold=threshold)
| 28.758242 | 111 | 0.555216 |
aceebb70dc923579c22fb3800f0927fc9b6040fc | 5,858 | py | Python | autodc/components/models/imbalanced_classification/easy_ensemble.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
] | null | null | null | autodc/components/models/imbalanced_classification/easy_ensemble.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
] | null | null | null | autodc/components/models/imbalanced_classification/easy_ensemble.py | dingdian110/AutoDC | f5ccca6bea993bcff3e804fb859e8b25ae020b5c | [
"MIT"
] | null | null | null | import numpy as np
from ConfigSpace.configuration_space import ConfigurationSpace
from ConfigSpace.hyperparameters import UniformFloatHyperparameter, \
UniformIntegerHyperparameter, CategoricalHyperparameter
from autodc.components.models.base_model import BaseClassificationModel
from autodc.components.utils.constants import DENSE, SPARSE, UNSIGNED_DATA, PREDICTIONS
from autodc.components.utils.configspace_utils import check_none, check_for_bool
class EasyEnsemble(BaseClassificationModel):
def __init__(self, n_estimators,
sampling_strategy,
replacement,
ab_n_estimators,
ab_max_depth,
ab_learning_rate,
ab_algorithm,
n_jobs=-1,
random_state=None):
self.n_estimators = n_estimators
self.sampling_strategy = sampling_strategy
self.replacement = replacement
self.random_state = random_state
# Parameters for Adaboost base learner
self.ab_max_depth = ab_max_depth
self.ab_n_estimators = ab_n_estimators
self.ab_learning_rate = ab_learning_rate
self.ab_algorithm = ab_algorithm
self.n_jobs = n_jobs
self.estimator = None
self.time_limit = None
def fit(self, X, Y, sample_weight=None):
import sklearn.tree
if self.estimator is None:
self.ab_max_depth = int(self.ab_max_depth)
base_estimator = sklearn.tree.DecisionTreeClassifier(max_depth=self.ab_max_depth)
self.estimator = sklearn.ensemble.AdaBoostClassifier(
base_estimator=base_estimator,
n_estimators=self.ab_n_estimators,
learning_rate=self.ab_learning_rate,
algorithm=self.ab_algorithm,
random_state=self.random_state
)
from imblearn.ensemble import EasyEnsembleClassifier
estimator = EasyEnsembleClassifier(base_estimator=self.estimator,
n_estimators=self.n_estimators,
sampling_strategy=self.sampling_strategy,
replacement=self.replacement,
n_jobs=self.n_jobs,
random_state=self.random_state)
estimator.fit(X, Y)
self.estimator = estimator
return self
def predict(self, X):
if self.estimator is None:
raise NotImplementedError
return self.estimator.predict(X)
def predict_proba(self, X):
if self.estimator is None:
raise NotImplementedError()
return self.estimator.predict_proba(X)
@staticmethod
def get_properties(dataset_properties=None):
return {'shortname': 'Easy_Ensemble',
'name': 'Easy Ensemble Classifier',
'handles_regression': False,
'handles_classification': True,
'handles_multiclass': True,
'handles_multilabel': True,
'is_deterministic': True,
'input': (DENSE, SPARSE, UNSIGNED_DATA),
'output': (PREDICTIONS,)}
@staticmethod
def get_hyperparameter_search_space(dataset_properties=None, optimizer='smac'):
if optimizer == 'smac':
cs = ConfigurationSpace()
n_estimators = UniformIntegerHyperparameter(
name="n_estimators", lower=50, upper=500, default_value=50, log=False)
sampling_strategy = CategoricalHyperparameter(
name="sampling_strategy", choices=["majority", "not minority", "not majority", "all"],
default_value="not minority")
replacement = CategoricalHyperparameter(
"replacement", ["True", "False"], default_value="False")
ab_n_estimators = UniformIntegerHyperparameter(
name="ab_n_estimators", lower=50, upper=500, default_value=50, log=False)
ab_learning_rate = UniformFloatHyperparameter(
name="ab_learning_rate", lower=0.01, upper=2, default_value=0.1, log=True)
ab_algorithm = CategoricalHyperparameter(
name="ab_algorithm", choices=["SAMME.R", "SAMME"], default_value="SAMME.R")
ab_max_depth = UniformIntegerHyperparameter(
name="ab_max_depth", lower=1, upper=10, default_value=1, log=False)
cs.add_hyperparameters([n_estimators, sampling_strategy, replacement, ab_n_estimators,
ab_learning_rate, ab_algorithm, ab_max_depth])
return cs
elif optimizer == 'tpe':
from hyperopt import hp
space = {'n_estimators': hp.randint('easy_ensemble_n_estimators', 451) + 50,
'sampling_strategy': hp.choice('easy_ensemble_sampling_strategy',
["majority", "not minority", "not majority", "all"]),
'replacement': hp.choice('easy_ensemble_replacement', ["True", "False"]),
'ab_n_estimators': hp.randint('ab_n_estimators', 451) + 50,
'ab_learning_rate': hp.loguniform('ab_learning_rate', np.log(0.01), np.log(2)),
'ab_algorithm': hp.choice('ab_algorithm', ["SAMME.R", "SAMME"]),
'ab_max_depth': hp.randint('ab_max_depth', 10) + 1}
init_trial = {'n_estimators': 10,
'sampling_strategy': "not minority",
'replacement': "False",
'ab_n_estimators': 50,
'ab_learning_rate': 0.1,
'ab_algorithm': "SAMME.R",
'ab_max_depth': 1}
return space
| 46.864 | 105 | 0.594059 |
aceebbbd24588faa6706bee6363608ebbe0cab6a | 5,708 | py | Python | assignment1/cs231n/classifiers/linear_classifier.py | lhq1208/cs231n_assignment | ed19a5266f295e9fee4c28bc09c286a24f47ac7f | [
"Apache-2.0"
] | 2 | 2017-12-27T01:49:36.000Z | 2017-12-27T03:29:03.000Z | assignment1/cs231n/classifiers/linear_classifier.py | lhq1208/cs231n_assignment | ed19a5266f295e9fee4c28bc09c286a24f47ac7f | [
"Apache-2.0"
] | null | null | null | assignment1/cs231n/classifiers/linear_classifier.py | lhq1208/cs231n_assignment | ed19a5266f295e9fee4c28bc09c286a24f47ac7f | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import numpy as np
from cs231n.classifiers.linear_svm import *
from cs231n.classifiers.softmax import *
from past.builtins import xrange
class LinearClassifier(object):
def __init__(self):
self.W = None
def train(self, X, y, learning_rate=1e-3, reg=1e-5, num_iters=100,
batch_size=200, verbose=False):
"""
Train this linear classifier using stochastic gradient descent.
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
- y: A numpy array of shape (N,) containing training labels; y[i] = c
means that X[i] has label 0 <= c < C for C classes.
- learning_rate: (float) learning rate for optimization.
- reg: (float) regularization strength.
- num_iters: (integer) number of steps to take when optimizing
- batch_size: (integer) number of training examples to use at each step.
- verbose: (boolean) If true, print progress during optimization.
Outputs:
A list containing the value of the loss function at each training iteration.
"""
num_train, dim = X.shape
num_classes = np.max(y) + 1 # assume y takes values 0...K-1 where K is number of classes
if self.W is None:
# lazily initialize W
self.W = 0.001 * np.random.randn(dim, num_classes)
# Run stochastic gradient descent to optimize W
loss_history = []
for it in range(num_iters):
X_batch = None
y_batch = None
#########################################################################
# TODO: #
# Sample batch_size elements from the training data and their #
# corresponding labels to use in this round of gradient descent. #
# Store the data in X_batch and their corresponding labels in #
# y_batch; after sampling X_batch should have shape (dim, batch_size) #
# and y_batch should have shape (batch_size,) #
# #
# Hint: Use np.random.choice to generate indices. Sampling with #
# replacement is faster than sampling without replacement. #
#########################################################################
indices = np.random.choice(num_train, batch_size)
X_batch = X[indices]
y_batch = y[indices]
#########################################################################
# END OF YOUR CODE #
#########################################################################
# evaluate loss and gradient
loss, grad = self.loss(X_batch, y_batch, reg)
loss_history.append(loss)
# perform parameter update
#########################################################################
# TODO: #
# Update the weights using the gradient and the learning rate. #
#########################################################################
self.W -= learning_rate * grad
#########################################################################
# END OF YOUR CODE #
#########################################################################
if verbose and it % 100 == 0:
print('iteration %d / %d: loss %f' % (it, num_iters, loss))
return loss_history
def predict(self, X):
"""
Use the trained weights of this linear classifier to predict labels for
data points.
Inputs:
- X: A numpy array of shape (N, D) containing training data; there are N
training samples each of dimension D.
Returns:
- y_pred: Predicted labels for the data in X. y_pred is a 1-dimensional
array of length N, and each element is an integer giving the predicted
class.
"""
y_pred = np.zeros(X.shape[0])
###########################################################################
# TODO: #
# Implement this method. Store the predicted labels in y_pred. #
###########################################################################
f = np.dot(X, self.W)
y_pred = np.argmax(f, axis=1)
###########################################################################
# END OF YOUR CODE #
###########################################################################
return y_pred
def loss(self, X_batch, y_batch, reg):
"""
Compute the loss function and its derivative.
Subclasses will override this.
Inputs:
- X_batch: A numpy array of shape (N, D) containing a minibatch of N
data points; each point has dimension D.
- y_batch: A numpy array of shape (N,) containing labels for the minibatch.
- reg: (float) regularization strength.
Returns: A tuple containing:
- loss as a single float
- gradient with respect to self.W; an array of the same shape as W
"""
pass
class LinearSVM(LinearClassifier):
""" A subclass that uses the Multiclass SVM loss function """
def loss(self, X_batch, y_batch, reg):
return svm_loss_vectorized(self.W, X_batch, y_batch, reg)
class Softmax(LinearClassifier):
""" A subclass that uses the Softmax + Cross-entropy loss function """
def loss(self, X_batch, y_batch, reg):
return softmax_loss_vectorized(self.W, X_batch, y_batch, reg)
| 41.064748 | 92 | 0.495095 |
aceebece8bcc63fc5189e40da01c2bb7904b7fad | 2,634 | py | Python | mozhi/model/pytorch/ner/lstm.py | gyan42/mozhi | ee54692b1913141e5fdfda486b7dcd2a37e9f39f | [
"Apache-2.0"
] | 2 | 2021-08-16T11:06:36.000Z | 2022-03-15T12:08:24.000Z | mozhi/model/pytorch/ner/lstm.py | gyan42/mozhi | ee54692b1913141e5fdfda486b7dcd2a37e9f39f | [
"Apache-2.0"
] | null | null | null | mozhi/model/pytorch/ner/lstm.py | gyan42/mozhi | ee54692b1913141e5fdfda486b7dcd2a37e9f39f | [
"Apache-2.0"
] | null | null | null | import torch
import torchmetrics
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import TensorDataset, DataLoader
import pytorch_lightning as pl
# from pytorch_lightning.metrics.functional import accuracy, f1_score
from keras.preprocessing.sequence import pad_sequences
from mozhi.protocol.dataprotocol import NERPreprocessorInfo
from mozhi.utils.pretty_print import print_error, print_info
class LSTMTagger(pl.LightningModule):
NAME = "LSTMTagger"
def __init__(self,
preprocessor_data_info: NERPreprocessorInfo = None,
embedding_dim=32,
hidden_dim=64):
super(LSTMTagger, self).__init__()
vocab_size = preprocessor_data_info.vocab_size
num_tags = preprocessor_data_info.tot_num_tags
sentence_max_length = preprocessor_data_info.max_sent_len
self.hidden_dim = hidden_dim
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim)
self.lstm = nn.LSTM(embedding_dim, hidden_dim)
# The linear layer that maps from hidden state space to tag space
self.hidden2tag = nn.Linear(hidden_dim, num_tags)
def forward(self, sentences):
embeds = self.word_embeddings(sentences)
lstm_out, _ = self.lstm(embeds)
logits = self.hidden2tag(lstm_out)
return logits
def training_step(self, batch, batch_idx):
x, y = batch # x: [BS x MAX_LEN] , y : [BS x MAX_LEN x NUM_TAGS]
y_hat = self(x) # BS x MAX_LEN x NUM_TAGS
y_hat = y_hat.permute(0, 2, 1) # BS x NUM_TAGS x MAX_LEN
loss = F.log_softmax(y_hat, dim=1)
# loss = nn.functional.cross_entropy(y_hat, y)
# y = y.permute(0, 2, 1) # BS x NUM_TAGS x MAX_LEN
# loss = nn.CrossEntropyLoss()(y_hat, y)
# loss = nn.NLLLoss()(y_hat, y)
loss = torch.mean(loss)
return {'loss': loss}
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
y_hat = y_hat.permute(0, 2, 1)
loss = nn.CrossEntropyLoss()(y_hat, y)
result = pl.EvalResult()
result.log('val_f1', torchmetrics.functional.f1_score(torch.argmax(y_hat, dim=1), y), prog_bar=True)
return result
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
y_hat = y_hat.permute(0, 2, 1)
loss = nn.CrossEntropyLoss()(y_hat, y)
return {'test_f1': torchmetrics.functional.f1_score(torch.argmax(y_hat, dim=1), y)}
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=5e-4)
| 38.173913 | 108 | 0.664768 |
aceebf5d410cc275265f1939658f124dfbc12f83 | 8,406 | py | Python | trees/ddt/ddt.py | islamazhar/trees | 502565c5bf02503c7bece09cddd93f9368da02c3 | [
"MIT"
] | null | null | null | trees/ddt/ddt.py | islamazhar/trees | 502565c5bf02503c7bece09cddd93f9368da02c3 | [
"MIT"
] | null | null | null | trees/ddt/ddt.py | islamazhar/trees | 502565c5bf02503c7bece09cddd93f9368da02c3 | [
"MIT"
] | null | null | null | import logging
import numpy as np
from .. import Tree
class DirichletDiffusionTree(Tree):
def __init__(self, root=None, constraints=[], **params):
super(DirichletDiffusionTree, self).__init__(root=root,
constraints=constraints,
**params)
self._marg_log_likelihood = None
def initialize_from_data(self, X):
logging.debug("Initializing tree from data...")
X = np.array(X)
N, _ = X.shape
points = set(range(N))
super(DirichletDiffusionTree, self).initialize_assignments(points)
self.reconfigure_subtree(self.root, X)
def reconfigure_subtree(self, root, X):
if root.is_root():
root_time = 0.0
else:
root_time = root.get_state('time')
for node in self.dfs(node=root):
if node == root:
node.set_state('time', root_time)
node.set_state('latent_value', sum(n.get_state('latent_value') for n in node.children) /
float(len(node.children)))
elif node.is_leaf():
node.set_state('time', 1.0)
node.set_state('latent_value', X[node.point].ravel())
else:
min_time = min(n.get_state('time') for n in node.children)
new_time = min_time - (min_time - root_time) / 2.0
node.set_state('time', new_time)
node.set_state('latent_value', sum(n.get_state('latent_value') for n in node.children) /
float(len(node.children)))
def calculate_node_likelihood(self, node=None):
node = node or self.root
if 'likelihood' in node.cache:
return node.get_cache('likelihood')
if node.is_leaf():
return 1, 0, self.likelihood_model.transition_probability(node.parent, node)
node_time = node.get_state('time')
left_child, right_child = node.children
path_count, tree_prob, data_prob = self.calculate_node_likelihood(node=left_child)
if not node.is_root():
tree_prob += self.df.log_no_divergence(node.parent.get_state('time'), node_time, path_count)
tree_prob += self.df.log_divergence(node_time)
data_prob += self.likelihood_model.transition_probability(node.parent, node)
right_path_count, right_tree_prob, right_data_prob = self.calculate_node_likelihood(node=right_child)
result = path_count + right_path_count, tree_prob + right_tree_prob, data_prob + right_data_prob
node.set_cache('likelihood', result)
return result
def calculate_marg_log_likelihood(self):
assert self.root is not None
_, tree_structure, data_structure = self.calculate_node_likelihood()
self._marg_log_likelihood = tree_structure + data_structure
def marg_log_likelihood(self):
if self._marg_log_likelihood is None:
self.calculate_marg_log_likelihood()
return self._marg_log_likelihood
def sample_assignment(self, node=None, constraints=None, points=None, index=None,
state=None):
node = node or self.root
constraints = constraints or self.constraints
points = points or frozenset()
index = index or ()
df = self.df
state = state or {}
logging.debug("Sampling assignment at index: %s" % str(index))
counts = [c.leaf_count() for c in node.children]
logging.debug("Path counts: %s" % str(counts))
total = float(sum(counts))
if len(constraints) > 0:
for idx, child in enumerate(node.children):
if child.is_required(constraints, points):
constraints = node.prune_constraints(constraints, points, idx)
logging.debug("Child is required: %u" % idx)
return self.sample_assignment(node=node.children[idx],
constraints=constraints,
points=points,
index=index + (idx,),
state=state)
left_prob = counts[0] / total
u = np.random.random()
choice = None
idx = -1
if len(constraints) > 0:
for i, child in enumerate(node.children):
if child.is_path_required(constraints, points):
idx = i
choice = child
break
if child.is_path_banned(constraints, points):
idx = 1 - i
choice = node.children[idx]
break
if choice is None:
if u < left_prob:
choice = node.children[0]
idx = 0
else:
choice = node.children[1]
idx = 1
prob = np.log(counts[idx]) - np.log(total)
logging.debug("Branching: %f" % prob)
node_time = node.get_state('time')
choice_time = choice.get_state('time')
if choice.is_banned(constraints, points):
sampled_time, _ = df.sample(node_time, choice_time, counts[idx])
diverge_prob = df.log_pdf(node_time, sampled_time, counts[idx])
prob += diverge_prob
state['time'] = sampled_time
return (index + (idx,), state), prob
constraints = node.prune_constraints(constraints, points, idx)
no_diverge_prob = (df.cumulative_divergence(node_time) - df.cumulative_divergence(choice_time)) / \
counts[idx]
u = np.random.random()
if u < np.exp(no_diverge_prob):
logging.debug("Not diverging: %f" % no_diverge_prob)
prob += no_diverge_prob
assignment, p = self.sample_assignment(node=node.children[idx],
constraints=constraints,
points=points,
index=index + (idx,),
state=state)
return assignment, prob + p
else:
sampled_time, _ = df.sample(node_time, choice_time, counts[idx])
diverge_prob = df.log_pdf(sampled_time, node_time, counts[idx])
logging.debug("Diverging at %f: %f" % (sampled_time, diverge_prob))
prob += diverge_prob
state['time'] = sampled_time
return (index + (idx,), state), prob
def log_prob_assignment(self, assignment, node=None):
node = node or self.root
(idx, state) = assignment
time = state['time']
assert idx is not ()
df = self.df
first, rest = idx[0], idx[1:]
counts = [c.leaf_count() for c in node.children]
total = float(sum(counts))
prob = np.log(counts[first]) - np.log(total)
logging.debug("Branching prob: %f" % prob)
node_time = node.get_state('time')
if len(idx) == 1:
diverge_prob = df.log_pdf(node_time, time, counts[first])
logging.debug("Diverging at %f: %f" % (time, diverge_prob))
return prob + diverge_prob
choice = node.children[first]
choice_time = choice.get_state('time')
no_diverge_prob = (df.cumulative_divergence(node_time) - df.cumulative_divergence(choice_time)) / \
counts[first]
logging.debug("Not diverging: %f" % no_diverge_prob)
return prob + no_diverge_prob + self.log_prob_assignment((rest, state), node=node.children[first])
def assign_node(self, node, assignment):
(idx, state) = assignment
assignee = self.get_node(idx)
assignee.attach(node)
node.parent.state.update(state)
def sample_latent(self):
for node in self.dfs():
if not node.is_leaf():
lv = self.likelihood_model.sample_transition(node, node.parent)
node.set_state('latent_value', lv)
node.delete_cache("likelihood")
def node_as_string(self, node):
return str(node.get_state('time'))
def get_parameters(self):
return {
"df",
"likelihood_model",
}
| 39.464789 | 109 | 0.561623 |
aceebf73a337718fdaada926e69ecbe4b426e351 | 497 | py | Python | wtitenaut/users/tests/test_tasks.py | konstantinmds/writernauts | 8073869786b16afbd91a78b3f2055589d72938b8 | [
"MIT"
] | null | null | null | wtitenaut/users/tests/test_tasks.py | konstantinmds/writernauts | 8073869786b16afbd91a78b3f2055589d72938b8 | [
"MIT"
] | 2 | 2022-03-01T05:03:28.000Z | 2022-03-02T05:05:19.000Z | wtitenaut/users/tests/test_tasks.py | konstantinmds/writernauts | 8073869786b16afbd91a78b3f2055589d72938b8 | [
"MIT"
] | null | null | null | import pytest
from celery.result import EagerResult
from wtitenaut.users.tasks import get_users_count
from wtitenaut.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
def test_user_count(settings):
"""A basic test to execute the get_users_count Celery task."""
UserFactory.create_batch(3)
settings.CELERY_TASK_ALWAYS_EAGER = True
task_result = get_users_count.delay()
assert isinstance(task_result, EagerResult)
assert task_result.result == 3
| 29.235294 | 66 | 0.790744 |
aceebf754c27cd94bcefbd2b3877559699546acb | 3,137 | py | Python | f/build_msg.py | writeblankspace/aichatbot | 13ee83888ea26439cfd56853b615d6abc6fb2cab | [
"MIT"
] | null | null | null | f/build_msg.py | writeblankspace/aichatbot | 13ee83888ea26439cfd56853b615d6abc6fb2cab | [
"MIT"
] | null | null | null | f/build_msg.py | writeblankspace/aichatbot | 13ee83888ea26439cfd56853b615d6abc6fb2cab | [
"MIT"
] | null | null | null | # these functions help build the message
# they use the dictionary to randomise the response with synonyms
# dictionary
from f._dict import d
from f.handle_references import handle_references
import random
randint = random.randint
# adds bias to the message. The response can be either good or bad, or neutral
# this isn't useful... yet
class Reply_Bias:
def __init__(self, good: int = 1, bad: int = 1, neutral: int = 1, meh: int = 1):
self.good = good
self.bad = bad
self.neutral = neutral
self.meh = meh
self.outcome = self.find_outcome()
def find_outcome(self):
outcomelist = []
for i in range(self.good):
outcomelist.append("good")
for i in range(self.bad):
outcomelist.append("bad")
for i in range(self.neutral):
outcomelist.append("neutral")
for i in range(self.meh):
outcomelist.append("meh")
return random.choice(outcomelist)
def build_msg(msg, use_append: bool = True, use_synonyms: bool = True, synonyms: list = d["synonyms"]):
""" Builds the message using `msg`. Spaces will be added automatically.
If `use_append` is set to True, the message will be appended with a random value from the `_append` dictionary.
If `use_synonyms` is set to True, synonyms will be used using the dictionary"""
if type(msg) == str:
msg = msg.split(" ")
# if use_append is True, add a random value from the _append dictionary
if use_append:
rand = randint(1, 7)
if rand == 1:
# e.g. "bruh the quick brown fox jumps over the lazy dog"
random_append = random.choice(d["_append"]["all"])
new = []
new.append(random_append)
for word in msg:
new.append(word)
elif rand == 2:
# e.g. "the quick brown fox jumps over the lazy dog bruh"
random_append = random.choice(d["_append"]["all"])
new = []
for word in msg:
new.append(word)
new.append(random_append)
elif rand == 3:
# e.g. "i- the quick brown fox jumps over the lazy dog"
random_append = random.choice(d["_append"]["all_prefixes"])
new = []
new.append(random_append)
for word in msg:
new.append(word)
elif rand == 4:
# e.g. "the quick brown fox jumps over the lazy dog ..."
random_append = random.choice(d["_append"]["all_suffixes"])
new = []
for word in msg:
new.append(word)
new.append(random_append)
elif rand in [5, 6, 7]:
# e.g. "the quick brown fox jumps over the lazy dog"
# nothing happens
new = msg
msg = new
if use_synonyms:
new_msg = []
for i in msg:
# synonyms is a list of lists, where each list is a list of words that are synonyms for each other
# if that doesn't make sense, then don't make sense of it
synonym_found = False
for j in synonyms:
if i in j:
# if word is in that synonym list
# choose a random synonym
synonym = random.choice(j)
# replace the word with the synonym
new_msg.append(synonym)
synonym_found = True
break
if not synonym_found:
new_msg.append(i)
else:
new_msg = msg
joined = " ".join(new_msg)
# remove spaces before punctuation
for i in [".", ",", "!", "?"]:
joined = joined.replace(f" {i}", f"{i}")
return joined
| 27.043103 | 112 | 0.667198 |
aceec0b2ac01881b604407aa5fabdc0d0302f468 | 723 | py | Python | docking/PI/main.py | milkyway103/docking | 2918255bbc725b980f1fe2fed12424638d33c28c | [
"Apache-2.0"
] | 2 | 2019-05-19T03:40:21.000Z | 2019-05-23T09:17:46.000Z | docking/PI/main.py | milkyway103/docking | 2918255bbc725b980f1fe2fed12424638d33c28c | [
"Apache-2.0"
] | null | null | null | docking/PI/main.py | milkyway103/docking | 2918255bbc725b980f1fe2fed12424638d33c28c | [
"Apache-2.0"
] | 1 | 2020-04-30T04:10:56.000Z | 2020-04-30T04:10:56.000Z | #!/usr/bin/python
import MPU
import gps
from bluetooth import *
server_socket=BluetoothSocket(RFCOMM)
port=1
server_socket.bind(("",PORT_ANY))
server_socket.listen(1)
port = server_socket.getsockname()[1]
#advertise_service( server_socket, "BtChat",service_id = uuid,service_classes = [ uuid, SERIAL_PORT_CLASS ],profiles = [ SERIAL_PORT_PROFILE ] )
print("Waiting for connection : channel %d" % port)
client_sock, client_info = server_socket.accept()
print('accepted')
infoarray = []
while (True):
infoarray [0]=MPU.get_x()
infoarray [1]=MPU.get_y()
infoarray [2]=MPU.get_z()
infoarray [3]=gps.get_lat()
infoarray [4]=gps.get_lon()
client_sock.send(infoarray)
| 25.821429 | 145 | 0.694329 |
aceec15ceb80e232afb49f7ee294c11dc3b56706 | 51 | py | Python | server/xss.py | NBanski/XSS-Catcher | c986a941dd3dec5d2617b46106d3e5dd665bffd2 | [
"MIT"
] | 98 | 2019-05-28T12:17:55.000Z | 2022-02-15T07:06:41.000Z | server/xss.py | NBanski/XSS-Catcher | c986a941dd3dec5d2617b46106d3e5dd665bffd2 | [
"MIT"
] | 18 | 2019-11-08T20:14:47.000Z | 2022-02-27T15:04:32.000Z | server/xss.py | NBanski/XSS-Catcher | c986a941dd3dec5d2617b46106d3e5dd665bffd2 | [
"MIT"
] | 13 | 2020-08-27T21:40:57.000Z | 2022-02-02T16:35:48.000Z | from app import create_app, db
app = create_app()
| 12.75 | 30 | 0.745098 |
aceec2363dc03b449a773354522f4c1107afdd96 | 1,688 | py | Python | radon_server/urls.py | leo212/radon_transform | 04721c365d01e9b6a17eba4130a13d453844e27d | [
"Apache-2.0"
] | null | null | null | radon_server/urls.py | leo212/radon_transform | 04721c365d01e9b6a17eba4130a13d453844e27d | [
"Apache-2.0"
] | 2 | 2021-03-19T01:28:34.000Z | 2022-01-13T01:19:59.000Z | radon_server/urls.py | leo212/radon_transform | 04721c365d01e9b6a17eba4130a13d453844e27d | [
"Apache-2.0"
] | null | null | null | """radon_server URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.views.decorators.csrf import csrf_exempt
from . import serve
from . import files
from . import transform
from . import upload
urlpatterns = [
path('admin/', admin.site.urls),
path('test/', files.get_status, name='get_status'),
path('get_filelist/<str:folder>', files.get_filelist),
path('transform/<str:algorithm>/<str:variant>/<str:filename>', transform.transform),
path('reconstruct/<str:method>/<int:tolerance>/<str:filename>', transform.reconstruct),
path('build_matrix/<str:algorithm>/<str:variant>/<int:size>', transform.build_matrix),
path('is_matrix_available/<str:algorithm>/<str:variant>/<int:size>', transform.is_matrix_available),
path('get_job_status/<int:job_id>', transform.get_job_status),
path('upload/', csrf_exempt(upload.upload_file)),
path('get_image/<str:filename>', serve.get_image),
path('get_result/<str:filename>', serve.get_result),
path('get_reconstructed/<str:filename>', serve.get_reconstructed)
]
| 43.282051 | 104 | 0.724526 |
aceec24a235f51a7f836e5ad0841154b4ea993ae | 3,495 | py | Python | neutron/plugins/ml2/extensions/port_security.py | NeCTAR-RC/neutron | acf78cc3c88aff638180819419a65145a9a79695 | [
"Apache-2.0"
] | 5 | 2015-10-20T07:56:53.000Z | 2017-12-31T22:39:15.000Z | neutron/plugins/ml2/extensions/port_security.py | cyysu/neutron_read | 07d1a526d7d44ad0207d27e0ee04f1582541ab89 | [
"Apache-2.0"
] | null | null | null | neutron/plugins/ml2/extensions/port_security.py | cyysu/neutron_read | 07d1a526d7d44ad0207d27e0ee04f1582541ab89 | [
"Apache-2.0"
] | 3 | 2015-05-08T22:36:28.000Z | 2015-10-24T21:25:35.000Z | # Copyright 2015 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api.v2 import attributes as attrs
from neutron.db import common_db_mixin
from neutron.db import portsecurity_db_common as ps_db_common
from neutron.extensions import portsecurity as psec
from neutron.i18n import _LI
from neutron.plugins.ml2 import driver_api as api
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class PortSecurityExtensionDriver(api.ExtensionDriver,
ps_db_common.PortSecurityDbCommon,
common_db_mixin.CommonDbMixin):
_supported_extension_alias = 'port-security'
def initialize(self):
LOG.info(_LI("PortSecurityExtensionDriver initialization complete"))
@property
def extension_alias(self):
return self._supported_extension_alias
def process_create_network(self, context, data, result):
# Create the network extension attributes.
if psec.PORTSECURITY in data:
self._process_network_port_security_create(context, data, result)
def process_update_network(self, context, data, result):
# Update the network extension attributes.
if psec.PORTSECURITY in data:
self._process_network_port_security_update(context, data, result)
def process_create_port(self, context, data, result):
# Create the port extension attributes.
data[psec.PORTSECURITY] = self._determine_port_security(context, data)
self._process_port_port_security_create(context, data, result)
def process_update_port(self, context, data, result):
if psec.PORTSECURITY in data:
self._process_port_port_security_update(
context, data, result)
def extend_network_dict(self, session, db_data, result):
self._extend_port_security_dict(result, db_data)
def extend_port_dict(self, session, db_data, result):
self._extend_port_security_dict(result, db_data)
def _extend_port_security_dict(self, response_data, db_data):
response_data[psec.PORTSECURITY] = (
db_data['port_security'][psec.PORTSECURITY])
def _determine_port_security(self, context, port):
"""Returns a boolean (port_security_enabled).
Port_security is the value associated with the port if one is present
otherwise the value associated with the network is returned.
"""
# we don't apply security groups for dhcp, router
if (port.get('device_owner') and
port['device_owner'].startswith('network:')):
return False
if attrs.is_attr_set(port.get(psec.PORTSECURITY)):
port_security_enabled = port[psec.PORTSECURITY]
else:
port_security_enabled = self._get_network_security_binding(
context, port['network_id'])
return port_security_enabled
| 40.172414 | 78 | 0.703577 |
aceec418fa56641bddc43b53e75fb450f103a7ba | 15,880 | py | Python | sdk/python/pulumi_aws/route53/resolver_firewall_rule_group.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-11-10T16:33:40.000Z | 2021-11-10T16:33:40.000Z | sdk/python/pulumi_aws/route53/resolver_firewall_rule_group.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_aws/route53/resolver_firewall_rule_group.py | chivandikwa/pulumi-aws | 19c08bf9dcb90544450ffa4eec7bf6751058fde2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ResolverFirewallRuleGroupArgs', 'ResolverFirewallRuleGroup']
@pulumi.input_type
class ResolverFirewallRuleGroupArgs:
def __init__(__self__, *,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ResolverFirewallRuleGroup resource.
:param pulumi.Input[str] name: A name that lets you identify the rule group, to manage and use it.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A name that lets you identify the rule group, to manage and use it.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ResolverFirewallRuleGroupState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
share_status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering ResolverFirewallRuleGroup resources.
:param pulumi.Input[str] arn: The ARN (Amazon Resource Name) of the rule group.
:param pulumi.Input[str] name: A name that lets you identify the rule group, to manage and use it.
:param pulumi.Input[str] owner_id: The AWS account ID for the account that created the rule group. When a rule group is shared with your account, this is the account that has shared the rule group with you.
:param pulumi.Input[str] share_status: Whether the rule group is shared with other AWS accounts, or was shared with the current account by another AWS account. Sharing is configured through AWS Resource Access Manager (AWS RAM). Valid values: `NOT_SHARED`, `SHARED_BY_ME`, `SHARED_WITH_ME`
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if name is not None:
pulumi.set(__self__, "name", name)
if owner_id is not None:
pulumi.set(__self__, "owner_id", owner_id)
if share_status is not None:
pulumi.set(__self__, "share_status", share_status)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
The ARN (Amazon Resource Name) of the rule group.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
A name that lets you identify the rule group, to manage and use it.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> Optional[pulumi.Input[str]]:
"""
The AWS account ID for the account that created the rule group. When a rule group is shared with your account, this is the account that has shared the rule group with you.
"""
return pulumi.get(self, "owner_id")
@owner_id.setter
def owner_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "owner_id", value)
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> Optional[pulumi.Input[str]]:
"""
Whether the rule group is shared with other AWS accounts, or was shared with the current account by another AWS account. Sharing is configured through AWS Resource Access Manager (AWS RAM). Valid values: `NOT_SHARED`, `SHARED_BY_ME`, `SHARED_WITH_ME`
"""
return pulumi.get(self, "share_status")
@share_status.setter
def share_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "share_status", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class ResolverFirewallRuleGroup(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides a Route 53 Resolver DNS Firewall rule group resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.route53.ResolverFirewallRuleGroup("example")
```
## Import
Route 53 Resolver DNS Firewall rule groups can be imported using the Route 53 Resolver DNS Firewall rule group ID, e.g.,
```sh
$ pulumi import aws:route53/resolverFirewallRuleGroup:ResolverFirewallRuleGroup example rslvr-frg-0123456789abcdef
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] name: A name that lets you identify the rule group, to manage and use it.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[ResolverFirewallRuleGroupArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Route 53 Resolver DNS Firewall rule group resource.
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.route53.ResolverFirewallRuleGroup("example")
```
## Import
Route 53 Resolver DNS Firewall rule groups can be imported using the Route 53 Resolver DNS Firewall rule group ID, e.g.,
```sh
$ pulumi import aws:route53/resolverFirewallRuleGroup:ResolverFirewallRuleGroup example rslvr-frg-0123456789abcdef
```
:param str resource_name: The name of the resource.
:param ResolverFirewallRuleGroupArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ResolverFirewallRuleGroupArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ResolverFirewallRuleGroupArgs.__new__(ResolverFirewallRuleGroupArgs)
__props__.__dict__["name"] = name
__props__.__dict__["tags"] = tags
__props__.__dict__["arn"] = None
__props__.__dict__["owner_id"] = None
__props__.__dict__["share_status"] = None
__props__.__dict__["tags_all"] = None
super(ResolverFirewallRuleGroup, __self__).__init__(
'aws:route53/resolverFirewallRuleGroup:ResolverFirewallRuleGroup',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
owner_id: Optional[pulumi.Input[str]] = None,
share_status: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'ResolverFirewallRuleGroup':
"""
Get an existing ResolverFirewallRuleGroup resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: The ARN (Amazon Resource Name) of the rule group.
:param pulumi.Input[str] name: A name that lets you identify the rule group, to manage and use it.
:param pulumi.Input[str] owner_id: The AWS account ID for the account that created the rule group. When a rule group is shared with your account, this is the account that has shared the rule group with you.
:param pulumi.Input[str] share_status: Whether the rule group is shared with other AWS accounts, or was shared with the current account by another AWS account. Sharing is configured through AWS Resource Access Manager (AWS RAM). Valid values: `NOT_SHARED`, `SHARED_BY_ME`, `SHARED_WITH_ME`
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ResolverFirewallRuleGroupState.__new__(_ResolverFirewallRuleGroupState)
__props__.__dict__["arn"] = arn
__props__.__dict__["name"] = name
__props__.__dict__["owner_id"] = owner_id
__props__.__dict__["share_status"] = share_status
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return ResolverFirewallRuleGroup(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
The ARN (Amazon Resource Name) of the rule group.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
A name that lets you identify the rule group, to manage and use it.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="ownerId")
def owner_id(self) -> pulumi.Output[str]:
"""
The AWS account ID for the account that created the rule group. When a rule group is shared with your account, this is the account that has shared the rule group with you.
"""
return pulumi.get(self, "owner_id")
@property
@pulumi.getter(name="shareStatus")
def share_status(self) -> pulumi.Output[str]:
"""
Whether the rule group is shared with other AWS accounts, or was shared with the current account by another AWS account. Sharing is configured through AWS Resource Access Manager (AWS RAM). Valid values: `NOT_SHARED`, `SHARED_BY_ME`, `SHARED_WITH_ME`
"""
return pulumi.get(self, "share_status")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
| 46.568915 | 297 | 0.658627 |
aceec41b73e536b4a6fda2f8e40e0821c1036d9a | 469 | py | Python | tests/regression/test_once_prevents_new_interactions.py | santosh653/betamax | 73e601d34d692255d826b2fceec6d9bc8b4c0420 | [
"Apache-2.0"
] | 226 | 2017-10-19T20:46:53.000Z | 2022-03-11T08:11:10.000Z | tests/regression/test_once_prevents_new_interactions.py | santosh653/betamax | 73e601d34d692255d826b2fceec6d9bc8b4c0420 | [
"Apache-2.0"
] | 93 | 2015-01-01T15:47:33.000Z | 2017-10-03T14:15:50.000Z | tests/regression/test_once_prevents_new_interactions.py | santosh653/betamax | 73e601d34d692255d826b2fceec6d9bc8b4c0420 | [
"Apache-2.0"
] | 44 | 2015-03-25T19:42:13.000Z | 2017-09-08T17:56:16.000Z | import pytest
import unittest
from betamax import Betamax, BetamaxError
from requests import Session
class TestOncePreventsNewInteractions(unittest.TestCase):
"""Test that using a cassette with once record mode prevents new requests.
"""
def test_once_prevents_new_requests(self):
s = Session()
with Betamax(s).use_cassette('once_record_mode'):
with pytest.raises(BetamaxError):
s.get('http://example.com')
| 24.684211 | 78 | 0.703625 |
aceec608a5f134da095e33e32198082abd845b46 | 2,998 | py | Python | gtsfm/runner/run_scene_optimizer_colmaploader.py | yuancaimaiyi/gtsfm | cc5781c35af23498d45cd96a1818e4786c5cca80 | [
"Apache-2.0"
] | null | null | null | gtsfm/runner/run_scene_optimizer_colmaploader.py | yuancaimaiyi/gtsfm | cc5781c35af23498d45cd96a1818e4786c5cca80 | [
"Apache-2.0"
] | null | null | null | gtsfm/runner/run_scene_optimizer_colmaploader.py | yuancaimaiyi/gtsfm | cc5781c35af23498d45cd96a1818e4786c5cca80 | [
"Apache-2.0"
] | 1 | 2021-09-23T13:08:49.000Z | 2021-09-23T13:08:49.000Z | import argparse
from pathlib import Path
import hydra
from dask.distributed import Client, LocalCluster, performance_report
from hydra.utils import instantiate
import gtsfm.utils.logger as logger_utils
from gtsfm.common.gtsfm_data import GtsfmData
from gtsfm.loader.colmap_loader import ColmapLoader
from gtsfm.scene_optimizer import SceneOptimizer
DATA_ROOT = Path(__file__).resolve().parent.parent.parent / "tests" / "data"
logger = logger_utils.get_logger()
def run_scene_optimizer(args) -> None:
""" """
with hydra.initialize_config_module(config_module="gtsfm.configs"):
# config is relative to the gtsfm module
cfg = hydra.compose(config_name=args.config_name)
scene_optimizer: SceneOptimizer = instantiate(cfg.SceneOptimizer)
loader = ColmapLoader(
colmap_files_dirpath=args.colmap_files_dirpath,
images_dir=args.images_dir,
max_frame_lookahead=args.max_frame_lookahead,
)
sfm_result_graph = scene_optimizer.create_computation_graph(
num_images=len(loader),
image_pair_indices=loader.get_valid_pairs(),
image_graph=loader.create_computation_graph_for_images(),
camera_intrinsics_graph=loader.create_computation_graph_for_intrinsics(),
image_shape_graph=loader.create_computation_graph_for_image_shapes(),
gt_pose_graph=loader.create_computation_graph_for_poses(),
)
# create dask client
cluster = LocalCluster(n_workers=args.num_workers, threads_per_worker=args.threads_per_worker)
with Client(cluster), performance_report(filename="dask-report.html"):
sfm_result = sfm_result_graph.compute()
assert isinstance(sfm_result, GtsfmData)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="GTSFM with intrinsics and image names stored in COLMAP-format")
parser.add_argument(
"--images_dir", type=str, required=True, help="path to directory containing png, jpeg, or jpg images files"
)
parser.add_argument(
"--colmap_files_dirpath",
type=str,
required=True,
help="path to directory containing images.txt, points3D.txt, and cameras.txt",
)
parser.add_argument(
"--max_frame_lookahead",
type=int,
default=1,
help="maximum number of consecutive frames to consider for matching/co-visibility",
)
parser.add_argument(
"--num_workers",
type=int,
default=1,
help="Number of workers to start (processes, by default)",
)
parser.add_argument(
"--threads_per_worker",
type=int,
default=1,
help="Number of threads per each worker",
)
parser.add_argument(
"--config_name",
type=str,
default="deep_front_end.yaml",
help="Choose sift_front_end.yaml or deep_front_end.yaml",
)
args = parser.parse_args()
run_scene_optimizer(args)
| 33.311111 | 115 | 0.693129 |
aceec6d3226f331ef2cd46a8ce7f885ab62ac773 | 2,072 | py | Python | windpyplus/technical/RS_OH_Tech.py | romepeng/windpyplus | e88fad3ed1b98aa107b1bba66ed10bd6dc521fbd | [
"MIT"
] | 3 | 2017-07-27T01:11:51.000Z | 2019-08-17T06:49:19.000Z | windpyplus/technical/RS_OH_Tech.py | romepeng/windpyplus | e88fad3ed1b98aa107b1bba66ed10bd6dc521fbd | [
"MIT"
] | null | null | null | windpyplus/technical/RS_OH_Tech.py | romepeng/windpyplus | e88fad3ed1b98aa107b1bba66ed10bd6dc521fbd | [
"MIT"
] | 2 | 2019-11-07T05:05:13.000Z | 2021-03-28T14:42:30.000Z | import pandas as pd
from iwindpy_plus.utils.tradedate import tradedate
from WindPy import w
w.start()
def RS_OH(stocklists):
data = w.wss(stocklists,
"sec_name,close,pct_chg,pct_chg_lowest_per,pct_chg_highest_per,pct_chg_5d,pct_chg_10d,pct_chg_1m,pct_chg_3m,pct_chg_6m,pct_chg_1y",
"tradeDate= " +tradedate(0)[1] , "priceAdj=F;cycle=D;startDate=" + tradedate(365)[0],"endDate="+tradedate(365)[1])
df = pd.DataFrame(data=data.Data, index= data.Fields, columns=data.Codes).T
df = df.sort_values(by= 'PCT_CHG_1Y', ascending= False)
#print(" stock num : {}".format(len(df)))
return df
def newHighLow(stocklists):
data = w.wss(stocklists,
"sec_name,close,history_high,history_low,stage_high,stage_low",
"tradeDate= " +tradedate(0)[1] , "priceAdj=F;n=10;m=365")
df = pd.DataFrame(data=data.Data, index= data.Fields, columns=data.Codes).T
df = df.sort_values(by= 'HISTORY_HIGH', ascending= False)
#print(" stock num : {}".format(len(df)))
return df
def techIndictor(stocklists, days = 0):
data = w.wss(stocklists,
"sec_name,close,ATR,BBI,BBIBOLL,BIAS,BOLL,CCI,DMA,DMI,EXPMA,MA,MACD,RC,ROC,RSI,SAR,STD,TRIX,vol_ratio",
"tradeDate=" + tradedate(days)[0] ,
"priceAdj=F;cycle=D;ATR_N=14;ATR_IO=1;BBI_N1=3;BBI_N2=6;BBI_N3=12;BBI_N4=24;BBIBOLL_N=10;BBIBOLL_Width=3;BBIBOLL_IO=1;BIAS_N=12;BOLL_N=26;BOLL_Width=2;BOLL_IO=1;CCI_N=14;DMA_S=10;DMA_L=50;DMA_N=10;DMA_IO=1;DMI_N=14;DMI_N1=6;DMI_IO=1;EXPMA_N=30;MA_N=3;MACD_L=26;MACD_S=12;MACD_N=9;MACD_IO=1;RC_N=50;ROC_interDay=12;ROC_N=6;ROC_IO=1;RSI_N=6;SAR_N=4;SAR_SP=2;SAR_MP=20;STD_N=26;TRIX_N1=12;TRIX_N2=20;TRIX_IO=1;VolumeRatio_N=50")
df = pd.DataFrame(data=data.Data, index= data.Fields, columns=data.Codes).T
df['CHG_5D/3D'] = 100*(df['MA'] - df['EXPMA'])/df['EXPMA']
df = df.sort_values(by='CHG_5D/3D', ascending = False )
return df
if __name__ == '__main__':
from iwindpy_plus.stockPool.mystocks import stocklists
df = newHighLow(stocklists)
print(df)
df = RS_OH(stocklists)
print(df)
| 49.333333 | 433 | 0.70222 |
aceec761eb86742128588c1bdf0298cdafcce988 | 192 | py | Python | Aula 22 – Módulos e Pacotes/uteis/numeros/__init__.py | Guilherme-Artigas/Python-avancado | 287e23ac3df181ff84bf5fae8ab925a4433dceb0 | [
"MIT"
] | null | null | null | Aula 22 – Módulos e Pacotes/uteis/numeros/__init__.py | Guilherme-Artigas/Python-avancado | 287e23ac3df181ff84bf5fae8ab925a4433dceb0 | [
"MIT"
] | null | null | null | Aula 22 – Módulos e Pacotes/uteis/numeros/__init__.py | Guilherme-Artigas/Python-avancado | 287e23ac3df181ff84bf5fae8ab925a4433dceb0 | [
"MIT"
] | null | null | null | def fatorial(p1):
f = 1
indice = p1
while indice >= 1:
f *= indice
indice -= 1
return f
def dobro(p1):
return p1 * 2
def triplo(p1):
return p1 * 3
| 11.294118 | 22 | 0.494792 |
aceec83cd25093bb3d47b44b802662d605156d0d | 3,800 | py | Python | var/spack/repos/builtin/packages/gobject-introspection/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2020-09-10T22:50:08.000Z | 2021-01-12T22:18:54.000Z | var/spack/repos/builtin/packages/gobject-introspection/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 14 | 2021-07-20T01:04:53.000Z | 2022-03-02T01:08:36.000Z | var/spack/repos/builtin/packages/gobject-introspection/package.py | kkauder/spack | 6ae8d5c380c1f42094b05d38be26b03650aafb39 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 1 | 2021-05-06T00:17:46.000Z | 2021-05-06T00:17:46.000Z | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
import spack.hooks.sbang as sbang
class GobjectIntrospection(Package):
"""The GObject Introspection is used to describe the program APIs and
collect them in a uniform, machine readable format.Cairo is a 2D graphics
library with support for multiple output"""
homepage = "https://wiki.gnome.org/Projects/GObjectIntrospection"
url = "http://ftp.gnome.org/pub/gnome/sources/gobject-introspection/1.49/gobject-introspection-1.49.2.tar.xz"
version('1.56.1', sha256='5b2875ccff99ff7baab63a34b67f8c920def240e178ff50add809e267d9ea24b')
version('1.49.2', sha256='73d59470ba1a546b293f54d023fd09cca03a951005745d86d586b9e3a8dde9ac')
version('1.48.0', sha256='fa275aaccdbfc91ec0bc9a6fd0562051acdba731e7d584b64a277fec60e75877')
depends_on("glib@2.49.2:", when="@1.49.2:")
# version 1.48.0 build fails with glib 2.49.4
depends_on("glib@2.48.1", when="@1.48.0")
depends_on("python")
depends_on("cairo")
depends_on("bison", type="build")
depends_on("flex", type="build")
depends_on("pkgconfig", type="build")
# GobjectIntrospection does not build with sed from darwin:
depends_on('sed', when='platform=darwin', type='build')
# This package creates several scripts from
# toosl/g-ir-tool-template.in. In their original form these
# scripts end up with a sbang line like
#
# `#!/usr/bin/env /path/to/spack/python`.
#
# These scripts are generated and then used as part of the build
# (other packages also use the scripts after they've been
# installed).
#
# The path to the spack python can become too long. Because these
# tools are used as part of the build, the normal hook that fixes
# this problem can't help us.
# This package fixes the problem in two steps:
# - it rewrites the g-ir-tool-template so that its sbang line
# refers directly to spack's python (filter_file step below); and
# - it patches the Makefile.in so that the generated Makefile has an
# extra sed expression in its TOOL_SUBSTITUTION that results in
# an `#!/bin/bash /path/to/spack/bin/sbang` unconditionally being
# inserted into the scripts as they're generated.
patch("sbang.patch")
def url_for_version(self, version):
url = 'http://ftp.gnome.org/pub/gnome/sources/gobject-introspection/{0}/gobject-introspection-{1}.tar.xz'
return url.format(version.up_to(2), version)
def setup_run_environment(self, env):
env.prepend_path("GI_TYPELIB_PATH",
join_path(self.prefix.lib, 'girepository-1.0'))
def setup_dependent_build_environment(self, env, dependent_spec):
env.prepend_path("XDG_DATA_DIRS", self.prefix.share)
env.prepend_path("GI_TYPELIB_PATH",
join_path(self.prefix.lib, 'girepository-1.0'))
def setup_dependent_run_environment(self, env, dependent_spec):
env.prepend_path("XDG_DATA_DIRS", self.prefix.share)
env.prepend_path("GI_TYPELIB_PATH",
join_path(self.prefix.lib, 'girepository-1.0'))
def install(self, spec, prefix):
configure("--prefix=%s" % prefix)
# we need to filter this file to avoid an overly long hashbang line
filter_file('#!/usr/bin/env @PYTHON@', '#!@PYTHON@',
'tools/g-ir-tool-template.in')
make()
make("install")
def setup_build_environment(self, env):
env.set('SPACK_SBANG', sbang.sbang_install_path())
@property
def parallel(self):
return not self.spec.satisfies('%fj')
| 43.181818 | 118 | 0.686842 |
aceec88b3d7338bd1ac6f84084b19e85dbfdb5be | 522 | py | Python | practice/practice37.py | tomhaoye/LetsPython | 3c5f66d2e672067ed9aea33c0abd6b01708734ff | [
"MIT"
] | null | null | null | practice/practice37.py | tomhaoye/LetsPython | 3c5f66d2e672067ed9aea33c0abd6b01708734ff | [
"MIT"
] | null | null | null | practice/practice37.py | tomhaoye/LetsPython | 3c5f66d2e672067ed9aea33c0abd6b01708734ff | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: UTF-8 -*-
if __name__ == "__main__":
N = 10
# input data
print 'please input ten num:\n'
l = []
for i in range(N):
l.append(int(raw_input('input a number:\n')))
print
for i in range(N):
print l[i]
print
# sort ten num
for i in range(N - 1):
min = i
for j in range(i + 1, N):
if l[min] > l[j]: min = j
l[i], l[min] = l[min], l[i]
print 'after sorted'
for i in range(N):
print l[i]
| 20.88 | 53 | 0.477011 |
aceec94946aca88758ba168abf46849b5d631070 | 1,384 | py | Python | rflib/runner/priority.py | tycoer/rflib-1 | 5746c668f990841bd8b8385408e8ddb268d22dd4 | [
"Apache-2.0"
] | null | null | null | rflib/runner/priority.py | tycoer/rflib-1 | 5746c668f990841bd8b8385408e8ddb268d22dd4 | [
"Apache-2.0"
] | null | null | null | rflib/runner/priority.py | tycoer/rflib-1 | 5746c668f990841bd8b8385408e8ddb268d22dd4 | [
"Apache-2.0"
] | 2 | 2021-07-30T04:22:46.000Z | 2021-07-30T05:08:43.000Z | # Copyright (c) RobotFlow. All rights reserved.
from enum import Enum
class Priority(Enum):
"""Hook priority levels.
+------------+------------+
| Level | Value |
+============+============+
| HIGHEST | 0 |
+------------+------------+
| VERY_HIGH | 10 |
+------------+------------+
| HIGH | 30 |
+------------+------------+
| NORMAL | 50 |
+------------+------------+
| LOW | 70 |
+------------+------------+
| VERY_LOW | 90 |
+------------+------------+
| LOWEST | 100 |
+------------+------------+
"""
HIGHEST = 0
VERY_HIGH = 10
HIGH = 30
NORMAL = 50
LOW = 70
VERY_LOW = 90
LOWEST = 100
def get_priority(priority):
"""Get priority value.
Args:
priority (int or str or :obj:`Priority`): Priority.
Returns:
int: The priority value.
"""
if isinstance(priority, int):
if priority < 0 or priority > 100:
raise ValueError('priority must be between 0 and 100')
return priority
elif isinstance(priority, Priority):
return priority.value
elif isinstance(priority, str):
return Priority[priority.upper()].value
else:
raise TypeError('priority must be an integer or Priority enum value')
| 25.163636 | 77 | 0.425578 |
aceec999309dcf951371b52baf01f72345a74721 | 321 | py | Python | django/firstproject/challenges/urls.py | dnootana/Python | 2881bafe8bc378fa3cae50a747fcea1a55630c63 | [
"MIT"
] | 1 | 2021-02-19T11:00:11.000Z | 2021-02-19T11:00:11.000Z | django/firstproject/challenges/urls.py | dnootana/Python | 2881bafe8bc378fa3cae50a747fcea1a55630c63 | [
"MIT"
] | null | null | null | django/firstproject/challenges/urls.py | dnootana/Python | 2881bafe8bc378fa3cae50a747fcea1a55630c63 | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
urlpatterns = [
#path("january", views.jan),
path("", views.index, name='index'),
path("<int:month_name>", views.monthly_challenge_by_number),
path("<str:month_name>", views.monthly_challenge, name="month-challenge"),
#path("february", views.feb),
]
| 26.75 | 78 | 0.679128 |
aceeca37fa5b2f21fc33efa207c4ab8fcdae14cd | 1,580 | py | Python | moisture.py | randysimpson/pi-iot | 8090aebe0def7b5dc2f0d355dad4a22cfbed4653 | [
"MIT"
] | 3 | 2021-01-05T14:50:21.000Z | 2022-03-27T12:16:14.000Z | moisture.py | randysimpson/pi-iot | 8090aebe0def7b5dc2f0d355dad4a22cfbed4653 | [
"MIT"
] | null | null | null | moisture.py | randysimpson/pi-iot | 8090aebe0def7b5dc2f0d355dad4a22cfbed4653 | [
"MIT"
] | null | null | null | '''
Copyright (©) 2021 - Randall Simpson
pi-iot
This class is used to gather moisture sensor metrics from a raspberry pi.
Psudocode:
If the value changes from the initial value then lets record the changes for the delay duration
and return an average value on a per minute basis. If the value stays the same then no reporting necessary.
'''
from sensor import Sensor
from generic import Generic
from metric import Metric
import datetime
import sys
import RPi.GPIO as GPIO
import time
import logging
logger = logging.getLogger('root')
class Moisture(Generic):
def __init__(self, source, metric_prefix, output, code, pin, metric_name, delay):
Generic.__init__(self, source, metric_prefix, output, code, pin, metric_name)
self.delay = delay
def get_info(self):
last_change_time = time.time()
current_time = time.time()
change_count = 0
while current_time - last_change_time < self.delay:
#loop during time to see if any changes happen
current_value = GPIO.input(self.pin)
time.sleep(0.00001)
current_time = time.time()
if current_value != self.initial_value:
change_count += 1
self.initial_value = current_value
#delay time is up, send metric if necessary
if change_count > 0:
#send a value of 0.5 if there is flapping, otherwise send the current value
val = 0.5 if change_count > 1 else current_value
self.metrics.append(Metric(self.name, val, datetime.datetime.utcnow()))
| 33.617021 | 108 | 0.68038 |
aceeca508e381bec6514c236bac3ed4979aaf450 | 42,299 | py | Python | robovat/envs/push/push_env.py | mjlbach/robovat | 4b46459531c50f3801e6557174e49bfec532d870 | [
"MIT"
] | null | null | null | robovat/envs/push/push_env.py | mjlbach/robovat | 4b46459531c50f3801e6557174e49bfec532d870 | [
"MIT"
] | null | null | null | robovat/envs/push/push_env.py | mjlbach/robovat | 4b46459531c50f3801e6557174e49bfec532d870 | [
"MIT"
] | null | null | null | """Pushing task environment.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os.path
import glob
import random
import socket
import shutil
import cv2
import gym
import numpy as np
from matplotlib import pyplot as plt
from robovat.envs import arm_env
from robovat.envs import robot_env
from robovat.envs.push import layouts
from robovat.observations import attribute_obs
from robovat.observations import camera_obs
from robovat.observations import pose_obs
from robovat.reward_fns import push_reward
from robovat.math import Pose
from robovat.utils import time_utils
from robovat.utils.logging import logger
class PushEnv(arm_env.ArmEnv):
"""Pushing task environment."""
def __init__(self,
simulator=None,
config=None,
debug=True):
"""Initialize.
Args:
simulator: Instance of the simulator.
config: Environment configuration.
debug: True if it is debugging mode, False otherwise.
"""
self.simulator = simulator
self.config = config or self.default_config
self.debug = debug
self.camera = self.create_camera(
height=self.config.KINECT2.DEPTH.HEIGHT,
width=self.config.KINECT2.DEPTH.WIDTH,
intrinsics=self.config.KINECT2.DEPTH.INTRINSICS,
translation=self.config.KINECT2.DEPTH.TRANSLATION,
rotation=self.config.KINECT2.DEPTH.ROTATION)
# Layout.
self.task_name = self.config.TASK_NAME
self.layout_id = self.config.LAYOUT_ID
if self.task_name is None:
self.layouts = None
self.num_layouts = 1
elif self.task_name == 'data_collection':
self.layouts = None
self.num_layouts = 1
else:
self.layouts = layouts.TASK_NAME_TO_LAYOUTS[self.task_name]
self.num_layouts = len(self.layouts)
# Action and configuration space.
self.num_goal_steps = self.config.NUM_GOAL_STEPS
self.cspace = gym.spaces.Box(
low=np.array(self.config.ACTION.CSPACE.LOW),
high=np.array(self.config.ACTION.CSPACE.HIGH),
dtype=np.float32)
start_low = np.array(self.config.ACTION.CSPACE.LOW, dtype=np.float32)
start_high = np.array(self.config.ACTION.CSPACE.HIGH, dtype=np.float32)
self.start_offset = 0.5 * (start_high + start_low)
self.start_range = 0.5 * (start_high - start_low)
self.start_z = self.config.ARM.FINGER_TIP_OFFSET + self.start_offset[2]
table_x = self.config.SIM.TABLE.POSE[0][0]
table_y = self.config.SIM.TABLE.POSE[0][1]
self.table_workspace = gym.spaces.Box(
low=np.array([table_x - 0.5 * self.config.TABLE.X_RANGE,
table_y - 0.5 * self.config.TABLE.Y_RANGE]),
high=np.array([table_x + 0.5 * self.config.TABLE.X_RANGE,
table_y + 0.5 * self.config.TABLE.Y_RANGE]),
dtype=np.float32)
# Movable Objects.
self.min_movable_bodies = self.config.MIN_MOVABLE_BODIES
self.max_movable_bodies = self.config.MAX_MOVABLE_BODIES
self.num_movable_bodies = None
self.movable_body_mask = None
if self.simulator:
movable_name = self.config.MOVABLE_NAME.upper()
self.movable_config = self.config.MOVABLE[movable_name]
self.movable_bodies = []
self.movable_paths = []
for pattern in self.movable_config.PATHS:
if not os.path.isabs(pattern):
pattern = os.path.join(self.simulator.assets_dir, pattern)
self.movable_paths += glob.glob(pattern)
assert len(self.movable_paths) > 0
self.target_movable_paths = []
for pattern in self.movable_config.TARGET_PATHS:
if not os.path.isabs(pattern):
pattern = os.path.join(self.simulator.assets_dir, pattern)
self.target_movable_paths += glob.glob(pattern)
assert len(self.target_movable_paths) > 0
else:
self.movable_config = None
self.movable_bodies = None
self.movable_paths = None
self.target_movable_paths = None
# Execution phases.
self.phase_list = ['initial',
'pre',
'start',
'motion',
'post',
'offstage',
'done']
# Action related information.
self.attributes = None
self.start_status = None
self.end_status = None
self.max_phase_steps = None
# Statistics.
self.num_total_steps = 0
self.num_unsafe = 0
self.num_ineffective = 0
self.num_useful = 0
self.num_successes = 0
self.num_successes_by_step = [0] * int(self.config.MAX_STEPS + 1)
# Observations and rewards.
observations = self.initialize_observations()
reward_fns = [
push_reward.PushReward(
name='reward',
task_name=self.task_name,
layout_id=self.layout_id,
is_planning=False
)
]
# Recording.
self.use_recording = self.config.RECORDING.USE
if self.use_recording:
self.recording_camera = None
self.recording_output_dir = None
self.video_writer = None
# Visualization.
if self.debug:
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(111)
plt.ion()
plt.show()
self.ax = ax
super(PushEnv, self).__init__(
observations=observations,
reward_fns=reward_fns,
simulator=self.simulator,
config=self.config,
debug=self.debug)
@property
def action_space(self):
if self.num_goal_steps is None:
action_shape = [4]
else:
assert self.num_goal_steps > 0
action_shape = [self.num_goal_steps, 4]
return gym.spaces.Box(
low=-np.ones(action_shape, dtype=np.float32),
high=np.ones(action_shape, dtype=np.float32),
dtype=np.float32)
def initialize_observations(self):
"""Initialize the observations.
Returns:
A list of observations.
"""
observations = [
attribute_obs.IntegerAttributeObs(
'num_episodes',
max_value=int(2**16 - 1),
name='num_episodes'),
attribute_obs.IntegerAttributeObs(
'num_steps',
max_value=int(2**16 - 1),
name='num_steps'),
attribute_obs.IntegerAttributeObs(
'layout_id',
max_value=self.num_layouts,
name='layout_id'),
attribute_obs.ArrayAttributeObs(
'movable_body_mask',
shape=[self.max_movable_bodies],
name='body_mask'),
]
# In simulation, ground truth segmented point clouds are provided. In
# the real world, the segmented point clouds are computed using
# clustering algorithms.
if self.simulator:
observations += [
camera_obs.SegmentedPointCloudObs(
self.camera,
num_points=self.config.OBS.NUM_POINTS,
num_bodies=self.max_movable_bodies,
name='point_cloud'),
]
else:
observations += [
camera_obs.SegmentedPointCloudObs(
self.camera,
num_points=self.config.OBS.NUM_POINTS,
num_bodies=self.max_movable_bodies,
crop_min=self.config.OBS.CROP_MIN,
crop_max=self.config.OBS.CROP_MAX,
confirm_target=True,
name='point_cloud'),
]
# Prestiged information.
if self.simulator and self.config.USE_PRESTIGE_OBS:
observations += [
pose_obs.PoseObs(
num_bodies=self.max_movable_bodies,
modality='position',
name='position'),
attribute_obs.FlagObs('is_safe', name='is_safe'),
attribute_obs.FlagObs('is_effective', name='is_effective'),
]
# Visual observations for visualization.
if self.config.USE_VISUALIZATION_OBS:
observations += [
camera_obs.CameraObs(
self.camera,
modality='rgb',
name='rgb'),
camera_obs.CameraObs(
self.camera,
modality='depth',
name='depth'),
]
return observations
def reset(self):
"""Reset."""
observations = super(PushEnv, self).reset()
self.reset_camera(
self.camera,
intrinsics=self.config.KINECT2.DEPTH.INTRINSICS,
translation=self.config.KINECT2.DEPTH.TRANSLATION,
rotation=self.config.KINECT2.DEPTH.ROTATION,
intrinsics_noise=self.config.KINECT2.DEPTH.INTRINSICS_NOISE,
translation_noise=self.config.KINECT2.DEPTH.TRANSLATION_NOISE,
rotation_noise=self.config.KINECT2.DEPTH.ROTATION_NOISE)
if self.use_recording:
hostname = socket.gethostname().split('.')[0]
self.recording_camera = self.create_camera(
height=self.config.RECORDING.CAMERA.HEIGHT,
width=self.config.RECORDING.CAMERA.WIDTH,
intrinsics=self.config.RECORDING.CAMERA.INTRINSICS,
translation=self.config.RECORDING.CAMERA.TRANSLATION,
rotation=self.config.RECORDING.CAMERA.ROTATION)
recording_tmp_dir = os.path.join('/tmp', 'recording')
if not os.path.exists(recording_tmp_dir):
os.makedirs(recording_tmp_dir)
if self.task_name is None:
recording_output_dir = os.path.join(
self.config.RECORDING.OUTPUT_DIR,
'data_collection')
else:
recording_output_dir = os.path.join(
self.config.RECORDING.OUTPUT_DIR,
'%s_layout%02d' % (self.task_name, self.layout_id)
)
if not os.path.exists(recording_output_dir):
logger.info('Saving recorded videos to %s ...',
recording_output_dir)
os.makedirs(recording_output_dir)
if self.video_writer is not None:
self.video_writer.release()
shutil.copyfile(self.recording_tmp_path,
self.recording_output_path)
name = '%s_%s.avi' % (
hostname, time_utils.get_timestamp_as_string())
resolution = (self.config.RECORDING.CAMERA.WIDTH,
self.config.RECORDING.CAMERA.HEIGHT)
self.recording_tmp_path = os.path.join(
recording_tmp_dir, name)
self.recording_output_path = os.path.join(
recording_output_dir, name)
self.video_writer = cv2.VideoWriter(
self.recording_tmp_path,
cv2.VideoWriter_fourcc(*'XVID'),
self.config.RECORDING.FPS,
resolution)
return observations
def reset_scene(self):
"""Reset the scene in simulation or the real world."""
super(PushEnv, self).reset_scene()
# Simulation.
if self.simulator:
self.num_movable_bodies = np.random.randint(
low=self.min_movable_bodies,
high=self.max_movable_bodies + 1)
self._load_movable_bodies()
if self.layouts is not None:
layout = self.layouts[self.layout_id]
self._load_tiles(
layout.region,
layout.size,
layout.offset,
z_offset=0.001 - 0.025,
rgba=layout.region_rgba)
if layout.goal is not None:
self._load_tiles(
layout.goal,
layout.size,
layout.offset,
z_offset=0.0015 - 0.025,
rgba=layout.goal_rgba)
else:
self.num_movable_bodies = self.max_movable_bodies
logger.info('Assume there are %d movable objects on the table.',
self.num_movable_bodies)
self.movable_body_mask = np.array(
[1] * self.num_movable_bodies +
[0] * (self.max_movable_bodies - self.num_movable_bodies))
# Attributes
self.attributes = {
'num_episodes': self.num_episodes,
'num_steps': self.num_steps,
'layout_id': self.layout_id,
'movable_body_mask': self.movable_body_mask,
'is_safe': True,
'is_effective': True,
}
def _load_tiles(self, centers, size, offset, z_offset=0.0, rgba=[0, 0, 0]):
"""Load tiles to the scene.
Args:
centers: Configuration of the tiles.
size: Side length of each tile.
offset: List of x-y position offsets.
z_offset: Offset of the z dimension.
rgba: The color of the tile.
"""
path = self.config.SIM.TILE.PATH
for i, center in enumerate(centers):
position = np.array(offset) + np.array(center) * size
height = self.table.position.z + z_offset
position = np.array([position[0], position[1], height])
euler = np.array([0, 0, 0])
pose = np.array([position, euler])
name = 'tile_%d' % i
body = self.simulator.add_body(
path, pose, is_static=True, name=name)
body.set_color(rgba=rgba, specular=[0, 0, 0])
def _load_movable_bodies(self):
"""Load movable bodies."""
assert self.simulator is not None
is_valid = False
while not is_valid:
logger.info('Loading movable objects...')
is_valid = True
self.movable_bodies = []
# Sample placements of the movable objects.
is_target = False
if self.layouts is None:
movable_poses = self._sample_body_poses(
self.num_movable_bodies, self.movable_config)
else:
layout = self.layouts[self.layout_id]
movable_poses = self._sample_body_poses_on_tiles(
self.num_movable_bodies,
self.movable_config,
layout)
is_target = (layout.target is not None)
for i in range(self.num_movable_bodies):
if i == 0 and is_target:
path = random.choice(self.target_movable_paths)
else:
path = random.choice(self.movable_paths)
pose = movable_poses[i]
scale = np.random.uniform(*self.movable_config.SCALE)
name = 'movable_%d' % i
# Add the body.
body = self.simulator.add_body(path, pose, scale, name=name)
if self.config.USE_RANDOM_RGBA:
r = np.random.uniform(0., 1.)
g = np.random.uniform(0., 1.)
b = np.random.uniform(0., 1.)
body.set_color(rgba=[r, g, b, 1.0], specular=[0, 0, 0])
# Wait for the new body to be dropped onto the table.
self.simulator.wait_until_stable(
body,
linear_velocity_threshold=0.1,
angular_velocity_threshold=0.1,
max_steps=500)
# Change physical properties.
mass = robot_env.get_config_value(self.movable_config.MASS)
lateral_friction = robot_env.get_config_value(
self.movable_config.FRICTION)
body.set_dynamics(
mass=mass,
lateral_friction=lateral_friction,
rolling_friction=None,
spinning_friction=None)
self.movable_bodies.append(body)
for body in self.movable_bodies:
if body.position.z < self.table.position.z:
is_valid = False
break
if not is_valid:
logger.info('Invalid arrangement, reset the scene...')
for i, body in enumerate(self.movable_bodies):
self.simulator.remove_body(body.name)
logger.info('Waiting for movable objects to be stable...')
self.simulator.wait_until_stable(self.movable_bodies)
def _sample_body_poses(self,
num_samples,
body_config,
max_attemps=32):
"""Sample body poses.
Args:
num_samples: Number of samples.
body_config: Configuration of the body.
max_attemps: Maximum number of attemps to find a feasible
placement.
Returns:
List of poses.
"""
while True:
movable_poses = []
for i in range(num_samples):
num_attemps = 0
is_valid = False
while not is_valid and num_attemps <= max_attemps:
pose = Pose.uniform(x=body_config.POSE.X,
y=body_config.POSE.Y,
z=body_config.POSE.Z,
roll=body_config.POSE.ROLL,
pitch=body_config.POSE.PITCH,
yaw=body_config.POSE.YAW)
# Check if the new pose is distant from other bodies.
is_valid = True
for other_pose in movable_poses:
dist = np.linalg.norm(
pose.position[:2] - other_pose.position[:2])
if dist < body_config.MARGIN:
is_valid = False
num_attemps += 1
break
if not is_valid:
logger.info('Cannot find the placement of body %d. '
'Start re-sampling.', i)
break
else:
movable_poses.append(pose)
if i == num_attemps:
break
return movable_poses
def _sample_body_poses_on_tiles(self,
num_samples,
body_config,
layout,
safe_drop_height=0.2,
max_attemps=32):
"""Sample tile poses on the tiles.
Args:
num_samples: Number of samples.
body_config: Configuration of the body.
layout: Configuration of the layout.
safe_drop_height: Dropping height of the body.
max_attemps: Maximum number of attemps to find a feasible
placement.
Returns:
List of poses.
"""
tile_size = layout.size
tile_offset = layout.offset
while True:
movable_poses = []
for i in range(num_samples):
num_attemps = 0
is_valid = False
if i == 0 and layout.target is not None:
tile_config = layout.target
else:
tile_config = layout.obstacle
while not is_valid and num_attemps <= max_attemps:
num_tiles = len(tile_config)
tile_id = np.random.choice(num_tiles)
tile_center = tile_config[tile_id]
x_range = [
tile_offset[0] + (tile_center[0] - 0.5) * tile_size,
tile_offset[0] + (tile_center[0] + 0.5) * tile_size]
y_range = [
tile_offset[1] + (tile_center[1] - 0.5) * tile_size,
tile_offset[1] + (tile_center[1] + 0.5) * tile_size]
z = self.table_pose.position.z + safe_drop_height
pose = Pose.uniform(x=x_range,
y=y_range,
z=z,
roll=[-np.pi, np.pi],
pitch=[-np.pi / 2, np.pi / 2],
yaw=[-np.pi, np.pi])
is_valid = True
for other_pose in movable_poses:
dist = np.linalg.norm(
pose.position[:2] - other_pose.position[:2])
if dist < body_config.MARGIN:
is_valid = False
num_attemps += 1
break
if not is_valid:
logger.info('Cannot find the placement of body %d. '
'Start re-sampling.', i)
break
else:
movable_poses.append(pose)
if i == num_attemps:
break
return movable_poses
def step(self, action):
"""Take a step.
See parent class.
"""
observation, reward, is_done, info = super(PushEnv, self).step(action)
if is_done and reward >= self.config.SUCCESS_THRESH:
self.num_successes += 1
self.num_successes_by_step[self._num_steps] += 1
if is_done:
logger.info(
'num_successes: %d, success_rate: %.3f',
self.num_successes,
self.num_successes / float(self._num_episodes + 1e-14),
)
text = ('num_successes_by_step: ' +
', '.join(['%d'] * int(self.config.MAX_STEPS + 1)))
logger.info(text, *self.num_successes_by_step)
logger.info(
'num_total_steps %d, '
'unsafe: %.3f, ineffective: %.3f, useful: %.3f',
self.num_total_steps,
self.num_unsafe / float(self.num_total_steps + 1e-14),
self.num_ineffective / float(self.num_total_steps + 1e-14),
self.num_useful / float(self.num_total_steps + 1e-14))
return observation, reward, is_done, info
def execute_action(self, action): # NOQA
"""Execute the robot action.
Args:
action: A dictionary of mode and argument of the action.
"""
self.attributes = {
'num_episodes': self.num_episodes,
'num_steps': self.num_steps,
'layout_id': self.layout_id,
'movable_body_mask': self.movable_body_mask,
'is_safe': True,
'is_effective': True,
}
waypoints = self._compute_all_waypoints(action)
self.phase = 'initial'
self.num_waypoints = 0
self.interrupt = False
self.start_status = self._get_movable_status()
while(self.phase != 'done'):
if self.simulator:
self.simulator.step()
if self.use_recording:
if (self.simulator.num_steps %
self.config.RECORDING.NUM_STEPS == 0):
self._record_screenshot()
if not (self.simulator.num_steps %
self.config.SIM.STEPS_CHECK == 0):
continue
# Phase transition.
if self.is_phase_ready():
self.phase = self.get_next_phase()
if self.config.DEBUG and self.debug:
logger.info('phase: %s, num_waypoints: %d',
self.phase, self.num_waypoints)
if self.simulator:
self.max_phase_steps = self.simulator.num_steps
if self.phase == 'motion':
self.max_phase_steps += (
self.config.SIM.MAX_MOTION_STEPS)
elif self.phase == 'offstage':
self.max_phase_steps += (
self.config.SIM.MAX_OFFSTAGE_STEPS)
else:
self.max_phase_steps += (
self.config.SIM.MAX_PHASE_STEPS)
if self.phase == 'pre':
pose = waypoints[self.num_waypoints][0].copy()
pose.z = self.config.ARM.GRIPPER_SAFE_HEIGHT
self.robot.move_to_gripper_pose(pose)
elif self.phase == 'start':
pose = waypoints[self.num_waypoints][0]
self.robot.move_to_gripper_pose(pose)
elif self.phase == 'motion':
pose = waypoints[self.num_waypoints][1]
self.robot.move_to_gripper_pose(pose)
elif self.phase == 'post':
self.num_waypoints += 1
pose = self.robot.end_effector.pose
pose.z = self.config.ARM.GRIPPER_SAFE_HEIGHT
self.robot.move_to_gripper_pose(pose)
elif self.phase == 'offstage':
self.robot.move_to_joint_positions(
self.config.ARM.OFFSTAGE_POSITIONS)
self.interrupt = False
if self.check_singularity():
self.interrupt = True
if not self.check_safety():
self.interrupt = True
self.attributes['is_safe'] = False
if self.interrupt:
if self.phase == 'done':
self._is_done = True
break
if self.simulator:
self.simulator.wait_until_stable(self.movable_bodies)
# Update attributes.
self.end_status = self._get_movable_status()
self.attributes['is_effective'] = self.check_effectiveness()
self.num_total_steps += 1
self.num_unsafe += int(not self.attributes['is_safe'])
self.num_ineffective += int(not self.attributes['is_effective'])
self.num_useful += int(self.attributes['is_safe'] and
self.attributes['is_effective'])
def _compute_all_waypoints(self, action):
"""Convert action of a single step or multiple steps to waypoints.
Args:
action: Action of a single step or actions of multiple steps.
Returns:
List of waypoints of a single step or multiple steps.
"""
if self.num_goal_steps is None:
waypoints = [self._compute_waypoints(action)]
else:
waypoints = [
self._compute_waypoints(action[i])
for i in range(self.num_goal_steps)]
return waypoints
def _compute_waypoints(self, action):
"""Convert action of a single step to waypoints.
Args:
action: The action of a single step.
Returns:
List of waypoints of a single step.
"""
action = np.reshape(action, [2, 2])
start = action[0, :]
motion = action[1, :]
# Start.
x = start[0] * self.start_range[0] + self.start_offset[0]
y = start[1] * self.start_range[1] + self.start_offset[1]
z = self.start_z
angle = 0.0
start = Pose(
[[x, y, z], [np.pi, 0, (angle + np.pi) % (2 * np.pi) - np.pi]]
)
# End.
delta_x = motion[0] * self.config.ACTION.MOTION.TRANSLATION_X
delta_y = motion[1] * self.config.ACTION.MOTION.TRANSLATION_Y
x = x + delta_x
y = y + delta_y
x = np.clip(x, self.cspace.low[0], self.cspace.high[0])
y = np.clip(y, self.cspace.low[1], self.cspace.high[1])
end = Pose(
[[x, y, z], [np.pi, 0, (angle + np.pi) % (2 * np.pi) - np.pi]]
)
waypoints = [start, end]
return waypoints
def get_next_phase(self):
"""Get the next phase of the current phase.
Returns:
The next phase as a string variable.
"""
if self.phase in self.phase_list:
if self.interrupt:
if self.phase not in ['post', 'offstage', 'done']:
return 'post'
i = self.phase_list.index(self.phase)
if i >= len(self.phase_list):
raise ValueError('phase %r does not have a next phase.')
else:
if self.num_goal_steps is not None:
if (self.phase == 'post' and
self.num_waypoints < self.num_goal_steps):
return 'pre'
return self.phase_list[i + 1]
else:
raise ValueError('Unrecognized phase: %r' % self.phase)
def is_phase_ready(self):
"""Check if the current phase is ready.
Returns:
The boolean value indicating if the current phase is ready.
"""
if self.interrupt:
return True
if self.simulator:
if self.robot.is_limb_ready() and self.robot.is_gripper_ready():
self.robot.arm.reset_targets()
return True
if self.max_phase_steps is None:
return True
if self.simulator.num_steps >= self.max_phase_steps:
if self.config.DEBUG:
logger.warning('Phase %s timeout.', self.phase)
self.robot.arm.reset_targets()
return True
return False
else:
return True
def check_singularity(self):
"""Check singularity.
Returns:
True if it is in simulation and the arm contacts the table, False
otherwise.
"""
if self.phase != 'motion':
return False
if self.simulator:
if self.simulator.check_contact(self.robot.arm, self.table):
if self.config.DEBUG:
logger.warning('Arm collides with the table.')
return True
return False
def check_safety(self):
"""Check if the action is safe.
Returns:
True if all the safty conditions are satisfied, False otherwise.
"""
if self.simulator:
if self.phase == 'pre':
if self.simulator.check_contact(
self.robot.arm, self.movable_bodies):
logger.warning('Unsafe action: Bodies stuck on robots.')
return False
if self.phase == 'start':
if self.simulator.check_contact(
self.robot.arm, self.movable_bodies):
dist = self.robot.end_effector.position.z - self.start_z
if abs(dist) <= 0.01:
return True
logger.warning('Unsafe action: Bodies stuck on robots.')
return False
if self.phase == 'done':
if self.simulator.check_contact(
self.robot.arm, self.movable_bodies):
logger.warning('Unsafe action: Bodies stuck on robots.')
return False
for body in self.movable_bodies:
if (
body.position.x < self.table_workspace.low[0] or
body.position.x > self.table_workspace.high[0] or
body.position.y < self.table_workspace.low[1] or
body.position.y > self.table_workspace.high[1]):
logger.warning('Unsafe action: Body left table.')
return False
else:
has_collided = self.robot.arm.has_collided()
return not has_collided
return True
def check_effectiveness(self):
"""Check if the action is effective.
Returns:
True if at least one of the object has a translation or orientation
larger than the threshold, False otherwise.
"""
if self.simulator:
delta_position = np.linalg.norm(
self.end_status[0] - self.start_status[0], axis=-1)
delta_position = np.sum(delta_position)
delta_angle = self.end_status[1] - self.start_status[1]
delta_angle = (delta_angle + np.pi) % (2 * np.pi) - np.pi
delta_angle = np.abs(delta_angle)
delta_angle = np.sum(delta_angle)
if (delta_position <= self.config.ACTION.MIN_DELTA_POSITION and
delta_angle <= self.config.ACTION.MIN_DELTA_ANGLE):
if self.config.DEBUG:
logger.warning('Ineffective action.')
return False
return True
def _get_movable_status(self):
"""Get the status of the movable objects.
Returns:
Concatenation of the positions and Euler angles of all objects in
the simulation, None in the real world.
"""
if self.simulator:
positions = [body.position for body in self.movable_bodies]
angles = [body.euler[2] for body in self.movable_bodies]
return [np.stack(positions, axis=0), np.stack(angles, axis=0)]
return None
def visualize(self, action, info): # NOQA
"""Visualize the action.
Args:
action: A selected action.
info: The policy infomation.
"""
num_info_samples = self.config.NUM_INFO_SAMPLES
# Reset.
images = self.camera.frames()
rgb = images['rgb']
self.ax.cla()
self.ax.imshow(rgb)
if 'position' in self.obs_data:
states = self.obs_data['position'][..., :2]
else:
point_clouds = self.obs_data['point_cloud']
states = np.mean(point_clouds[..., :2], axis=-2)
if 'pred_goals' in info:
pred_states = info['pred_goals']
terminations = info['goal_terminations']
max_plots = min(num_info_samples, self.config.MAX_STATE_PLOTS)
for i in range(max_plots):
self._plot_pred_states(self.ax,
states,
pred_states[i],
terminations[i],
c='gold',
alpha=0.8)
t = 0
self._plot_states_distribution(
self.ax,
pred_states[:, t],
terminations[:, t],
body_index=0,
num_plots=num_info_samples,
c='gold',
alpha=0.5)
if 'pred_states' in info:
pred_states = info['pred_states']
terminations = info['terminations']
max_plots = min(num_info_samples, self.config.MAX_STATE_PLOTS)
for i in range(max_plots):
self._plot_pred_states(self.ax,
states,
pred_states[i],
terminations[i],
c='lawngreen',
alpha=0.5)
if 'actions' in info:
actions = info['actions']
max_plots = min(num_info_samples, self.config.MAX_ACTION_PLOTS)
for t in range(self.num_goal_steps):
if t == 0:
c = 'royalblue'
elif t == 1:
c = 'deepskyblue'
elif t == 2:
c = 'azure'
else:
raise ValueError
for i in range(max_plots):
if i == 0:
linewidth = 3.0
else:
linewidth = 1.0
waypoints = self._compute_waypoints(actions[i, t])
self._plot_waypoints(self.ax,
waypoints,
linewidth=linewidth,
c=c,
alpha=0.5)
# Plot waypoints in simulation.
if self.simulator and self.num_goal_steps is None:
waypoints = self._compute_waypoints(actions)
self._plot_waypoints_in_simulation(waypoints)
plt.draw()
plt.pause(1e-3)
def _plot_waypoints(self,
ax,
waypoints,
linewidth=1.0,
c='blue',
alpha=1.0):
"""Plot waypoints.
Args:
ax: An instance of Matplotlib Axes.
waypoints: List of waypoints.
linewidth: Width of the lines connecting waypoints.
c: Color of the lines connecting waypoints.
alpha: Alpha value of the lines connecting waypoints.
"""
z = self.table_pose.position.z
p1 = None
p2 = None
for i, waypoint in enumerate(waypoints):
point1 = waypoint.position
point1 = np.array([point1[0], point1[1], z])
p1 = self.camera.project_point(point1)
if i == 0:
ax.scatter(p1[0], p1[1],
c=c, alpha=alpha, s=2.0)
else:
ax.plot([p1[0], p2[0]], [p1[1], p2[1]],
c=c, alpha=alpha, linewidth=linewidth)
p2 = p1
def _plot_pred_states(self,
ax,
states,
pred_states,
terminations,
c='lawngreen',
alpha=1.0):
"""Plot predicted states.
Args:
ax: An instance of Matplotlib Axes.
states: The current states.
pred_states: The predicted states.
terminations: Termination flags of the predicted states.
c: Color of the body centers.
alpha: Alpha value of the body centers
"""
num_steps = pred_states.shape[0]
z = self.table_pose.position.z
for j in range(self.num_movable_bodies):
points1 = np.array(list(states[j]) + [z])
p1 = self.camera.project_point(points1)
for t in range(num_steps):
points2 = np.array(list(pred_states[t, j]) + [z])
p2 = self.camera.project_point(points2)
# if np.linalg.norm(points2 - points1) < 0.1:
# continue
ax.arrow(p1[0], p1[1], p2[0] - p1[0], p2[1] - p1[1],
head_width=10, head_length=10,
fc=c, ec=c, alpha=alpha,
zorder=100)
points1 = points2
p1 = p2
# Stop plotting after termination.
if terminations[t]:
break
def _plot_states_distribution(self,
ax,
pred_states,
terminations,
body_index,
num_plots,
c='lawngreen',
alpha=1.0):
"""Plot the distribution of predicted states.
Args:
ax: An instance of Matplotlib Axes.
pred_states: The predicted states.
terminations: Termination flags of the predicted states.
body_index: The index of the body to plot.
num_plots: Number of bodies to plot.
c: Color of the body centers.
alpha: Alpha value of the body centers
"""
z = self.table_pose.position.z
for i in range(num_plots):
position = pred_states[i, body_index]
points = np.array(list(position) + [z])
p = self.camera.project_point(points)
if terminations[i]:
c_i = 'r'
else:
c_i = c
ax.scatter(p[0], p[1], c=c_i, alpha=alpha)
def _plot_waypoints_in_simulation(self, waypoints):
"""Plot waypoints in simulation.
Args:
waypoints: List of waypoints.
"""
self.simulator.clear_visualization()
for i, waypoints_i in enumerate(waypoints):
for j in range(len(waypoints_i)):
waypoint = waypoints_i[j]
if j == 0:
text = '%d' % (i)
else:
text = None
self.simulator.plot_pose(waypoint, 0.05, text)
for j in range(1, len(waypoints_i)):
self.simulator.plot_line(waypoints_i[j - 1].position,
waypoints_i[j].position)
def _record_screenshot(self):
"""Record a screenshot and save to file."""
assert self.simulator is not None
images = self.recording_camera.frames()
image = images['rgb']
image = image.astype(np.uint8)
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
self.video_writer.write(image)
| 36.527634 | 79 | 0.510556 |
aceecb98581b53fba0d9228e03352479e7976407 | 1,983 | py | Python | test/test_server_config_result_entry_ref.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 21 | 2018-03-29T14:20:35.000Z | 2021-10-13T05:11:41.000Z | test/test_server_config_result_entry_ref.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 14 | 2018-01-30T15:45:46.000Z | 2022-02-23T14:23:21.000Z | test/test_server_config_result_entry_ref.py | sdnit-se/intersight-python | 551f7685c0f76bb8af60ec83ffb6f9672d49a4ae | [
"Apache-2.0"
] | 18 | 2018-01-03T15:09:56.000Z | 2021-07-16T02:21:54.000Z | # coding: utf-8
"""
Cisco Intersight
Cisco Intersight is a management platform delivered as a service with embedded analytics for your Cisco and 3rd party IT infrastructure. This platform offers an intelligent level of management that enables IT organizations to analyze, simplify, and automate their environments in more advanced ways than the prior generations of tools. Cisco Intersight provides an integrated and intuitive management experience for resources in the traditional data center as well as at the edge. With flexible deployment options to address complex security needs, getting started with Intersight is quick and easy. Cisco Intersight has deep integration with Cisco UCS and HyperFlex systems allowing for remote deployment, configuration, and ongoing maintenance. The model-based deployment works for a single system in a remote location or hundreds of systems in a data center and enables rapid, standardized configuration and deployment. It also streamlines maintaining those systems whether you are working with small or very large configurations. # noqa: E501
The version of the OpenAPI document: 1.0.9-1295
Contact: intersight@cisco.com
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import intersight
from intersight.models.server_config_result_entry_ref import ServerConfigResultEntryRef # noqa: E501
from intersight.rest import ApiException
class TestServerConfigResultEntryRef(unittest.TestCase):
"""ServerConfigResultEntryRef unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testServerConfigResultEntryRef(self):
"""Test ServerConfigResultEntryRef"""
# FIXME: construct object with mandatory attributes with example values
# model = intersight.models.server_config_result_entry_ref.ServerConfigResultEntryRef() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 52.184211 | 1,052 | 0.787695 |
aceeccc15291ae283ed6e77424dd90cce3aa4a07 | 1,242 | py | Python | install.py | kazulagi/plantfem_min | ba7398c031636644aef8acb5a0dad7f9b99fcb92 | [
"MIT"
] | null | null | null | install.py | kazulagi/plantfem_min | ba7398c031636644aef8acb5a0dad7f9b99fcb92 | [
"MIT"
] | null | null | null | install.py | kazulagi/plantfem_min | ba7398c031636644aef8acb5a0dad7f9b99fcb92 | [
"MIT"
] | null | null | null | import os
import sys
import platform
print("Detecting OS type...")
pf=platform.system()
if pf == 'Windows':
print("OS : Windows")
print("Now installing...")
os.system("install/install.bat")
print("Please use Windows Subsystem Linux(WSL) ")
print("Successfully Installed!!")
elif pf == "Darwin":
print("OS : macOS")
print("Now installing...")
#os.system("sh ./setup/setup_macOS")
os.system("python3 "+str(os.path.abspath("./"))+"/setup.py")
os.system("sh "+str(os.path.abspath("./"))+"/install/install")
os.system("ln -si "+str(os.path.abspath("./"))+"/plantfem /usr/local/bin")
os.system("sudo ln -si "+str(os.path.abspath("./"))+"/bin/soja.sh /usr/local/bin/soja")
print("Successfully Installed!!")
elif pf == "Linux":
print("OS : Linux")
print("Now installing...")
#os.system("sh ./setup/setup")
os.system("python3 "+str(os.path.abspath("./"))+"/setup.py")
os.system("sh "+str(os.path.abspath("./"))+"/install/install")
os.system("sudo ln -si "+str(os.path.abspath("./"))+"/plantfem /usr/local/bin")
os.system("sudo ln -si "+str(os.path.abspath("./"))+"/bin/soja.sh /usr/local/bin/soja")
print("Successfully Installed!!")
else:
print("OS : Unknown ") | 37.636364 | 91 | 0.615137 |
aceecd6b3d74627f3650f86ae979a97e98d3096d | 223 | py | Python | monolith_filemanager/console_commands/install_boto.py | MonolithAILtd/monolith-filemanager | 2369e244e4d8a48890f55d00419a83001a5c6c40 | [
"Apache-2.0"
] | 3 | 2021-06-02T09:45:00.000Z | 2022-02-01T14:30:01.000Z | monolith_filemanager/console_commands/install_boto.py | MonolithAILtd/monolith-filemanager | 2369e244e4d8a48890f55d00419a83001a5c6c40 | [
"Apache-2.0"
] | 3 | 2021-05-26T11:46:28.000Z | 2021-11-04T10:14:42.000Z | monolith_filemanager/console_commands/install_boto.py | MonolithAILtd/monolith-filemanager | 2369e244e4d8a48890f55d00419a83001a5c6c40 | [
"Apache-2.0"
] | 2 | 2021-06-04T15:02:14.000Z | 2021-09-03T09:26:45.000Z | import subprocess
import sys
def install_boto():
"""
Installs the boto requirement for the package.
Returns: None
"""
subprocess.check_call([sys.executable, '-m', 'pip', 'install', 'boto3>=1.16.43'])
| 18.583333 | 85 | 0.64574 |
aceecdaa350609d84d25bb6cef19177be2addb5f | 42,000 | py | Python | patroni/postgresql/__init__.py | kmoppel/patroni | e2b2daf0b3089793b1c8298e246ffc4fc5580561 | [
"MIT"
] | null | null | null | patroni/postgresql/__init__.py | kmoppel/patroni | e2b2daf0b3089793b1c8298e246ffc4fc5580561 | [
"MIT"
] | null | null | null | patroni/postgresql/__init__.py | kmoppel/patroni | e2b2daf0b3089793b1c8298e246ffc4fc5580561 | [
"MIT"
] | null | null | null | import logging
import os
import psycopg2
import shlex
import shutil
import subprocess
import time
from contextlib import contextmanager
from copy import deepcopy
from dateutil import tz
from datetime import datetime
from patroni.postgresql.callback_executor import CallbackExecutor
from patroni.postgresql.bootstrap import Bootstrap
from patroni.postgresql.cancellable import CancellableSubprocess
from patroni.postgresql.config import ConfigHandler, mtime
from patroni.postgresql.connection import Connection, get_connection_cursor
from patroni.postgresql.misc import parse_history, parse_lsn, postgres_major_version_to_int
from patroni.postgresql.postmaster import PostmasterProcess
from patroni.postgresql.slots import SlotsHandler
from patroni.exceptions import PostgresConnectionException
from patroni.utils import Retry, RetryFailedError, polling_loop, data_directory_is_empty, parse_int
from psutil import TimeoutExpired
from threading import current_thread, Lock
logger = logging.getLogger(__name__)
ACTION_ON_START = "on_start"
ACTION_ON_STOP = "on_stop"
ACTION_ON_RESTART = "on_restart"
ACTION_ON_RELOAD = "on_reload"
ACTION_ON_ROLE_CHANGE = "on_role_change"
ACTION_NOOP = "noop"
STATE_RUNNING = 'running'
STATE_REJECT = 'rejecting connections'
STATE_NO_RESPONSE = 'not responding'
STATE_UNKNOWN = 'unknown'
STOP_POLLING_INTERVAL = 1
@contextmanager
def null_context():
yield
class Postgresql(object):
POSTMASTER_START_TIME = "pg_catalog.to_char(pg_catalog.pg_postmaster_start_time(), 'YYYY-MM-DD HH24:MI:SS.MS TZ')"
TL_LSN = ("CASE WHEN pg_catalog.pg_is_in_recovery() THEN 0 "
"ELSE ('x' || pg_catalog.substr(pg_catalog.pg_{0}file_name("
"pg_catalog.pg_current_{0}_{1}()), 1, 8))::bit(32)::int END, " # master timeline
"CASE WHEN pg_catalog.pg_is_in_recovery() THEN 0 "
"ELSE pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_current_{0}_{1}(), '0/0')::bigint END, " # write_lsn
"pg_catalog.pg_{0}_{1}_diff(pg_catalog.pg_last_{0}_replay_{1}(), '0/0')::bigint, "
"pg_catalog.pg_{0}_{1}_diff(COALESCE(pg_catalog.pg_last_{0}_receive_{1}(), '0/0'), '0/0')::bigint, "
"pg_catalog.pg_is_in_recovery() AND pg_catalog.pg_is_{0}_replay_paused()")
def __init__(self, config):
self.name = config['name']
self.scope = config['scope']
self._data_dir = config['data_dir']
self._database = config.get('database', 'postgres')
self._version_file = os.path.join(self._data_dir, 'PG_VERSION')
self._pg_control = os.path.join(self._data_dir, 'global', 'pg_control')
self._major_version = self.get_major_version()
self._state_lock = Lock()
self.set_state('stopped')
self._pending_restart = False
self._connection = Connection()
self.config = ConfigHandler(self, config)
self.config.check_directories()
self._bin_dir = config.get('bin_dir') or ''
self.bootstrap = Bootstrap(self)
self.bootstrapping = False
self.__thread_ident = current_thread().ident
self.slots_handler = SlotsHandler(self)
self._callback_executor = CallbackExecutor()
self.__cb_called = False
self.__cb_pending = None
self.cancellable = CancellableSubprocess()
self._sysid = None
self.retry = Retry(max_tries=-1, deadline=config['retry_timeout']/2.0, max_delay=1,
retry_exceptions=PostgresConnectionException)
# Retry 'pg_is_in_recovery()' only once
self._is_leader_retry = Retry(max_tries=1, deadline=config['retry_timeout']/2.0, max_delay=1,
retry_exceptions=PostgresConnectionException)
self._role_lock = Lock()
self.set_role(self.get_postgres_role_from_data_directory())
self._state_entry_timestamp = None
self._cluster_info_state = {}
self._cached_replica_timeline = None
# Last known running process
self._postmaster_proc = None
if self.is_running():
self.set_state('running')
self.set_role('master' if self.is_leader() else 'replica')
self.config.write_postgresql_conf() # we are "joining" already running postgres
hba_saved = self.config.replace_pg_hba()
ident_saved = self.config.replace_pg_ident()
if hba_saved or ident_saved:
self.reload()
elif self.role == 'master':
self.set_role('demoted')
@property
def create_replica_methods(self):
return self.config.get('create_replica_methods', []) or self.config.get('create_replica_method', [])
@property
def major_version(self):
return self._major_version
@property
def database(self):
return self._database
@property
def data_dir(self):
return self._data_dir
@property
def callback(self):
return self.config.get('callbacks') or {}
@property
def wal_dir(self):
return os.path.join(self._data_dir, 'pg_' + self.wal_name)
@property
def wal_name(self):
return 'wal' if self._major_version >= 100000 else 'xlog'
@property
def lsn_name(self):
return 'lsn' if self._major_version >= 100000 else 'location'
@property
def cluster_info_query(self):
if self._major_version >= 90600:
extra = (", CASE WHEN latest_end_lsn IS NULL THEN NULL ELSE received_tli END,"
" slot_name, conninfo FROM pg_catalog.pg_stat_get_wal_receiver()")
if self.role == 'standby_leader':
extra = "timeline_id" + extra + ", pg_catalog.pg_control_checkpoint()"
else:
extra = "0" + extra
else:
extra = "0, NULL, NULL, NULL"
return ("SELECT " + self.TL_LSN + ", {2}").format(self.wal_name, self.lsn_name, extra)
def _version_file_exists(self):
return not self.data_directory_empty() and os.path.isfile(self._version_file)
def get_major_version(self):
if self._version_file_exists():
try:
with open(self._version_file) as f:
return postgres_major_version_to_int(f.read().strip())
except Exception:
logger.exception('Failed to read PG_VERSION from %s', self._data_dir)
return 0
def pgcommand(self, cmd):
"""Returns path to the specified PostgreSQL command"""
return os.path.join(self._bin_dir, cmd)
def pg_ctl(self, cmd, *args, **kwargs):
"""Builds and executes pg_ctl command
:returns: `!True` when return_code == 0, otherwise `!False`"""
pg_ctl = [self.pgcommand('pg_ctl'), cmd]
return subprocess.call(pg_ctl + ['-D', self._data_dir] + list(args), **kwargs) == 0
def pg_isready(self):
"""Runs pg_isready to see if PostgreSQL is accepting connections.
:returns: 'ok' if PostgreSQL is up, 'reject' if starting up, 'no_resopnse' if not up."""
r = self.config.local_connect_kwargs
cmd = [self.pgcommand('pg_isready'), '-p', r['port'], '-d', self._database]
# Host is not set if we are connecting via default unix socket
if 'host' in r:
cmd.extend(['-h', r['host']])
# We only need the username because pg_isready does not try to authenticate
if 'user' in r:
cmd.extend(['-U', r['user']])
ret = subprocess.call(cmd)
return_codes = {0: STATE_RUNNING,
1: STATE_REJECT,
2: STATE_NO_RESPONSE,
3: STATE_UNKNOWN}
return return_codes.get(ret, STATE_UNKNOWN)
def reload_config(self, config, sighup=False):
self.config.reload_config(config, sighup)
self._is_leader_retry.deadline = self.retry.deadline = config['retry_timeout']/2.0
@property
def pending_restart(self):
return self._pending_restart
def set_pending_restart(self, value):
self._pending_restart = value
@property
def sysid(self):
if not self._sysid and not self.bootstrapping:
data = self.controldata()
self._sysid = data.get('Database system identifier', "")
return self._sysid
def get_postgres_role_from_data_directory(self):
if self.data_directory_empty() or not self.controldata():
return 'uninitialized'
elif self.config.recovery_conf_exists():
return 'replica'
else:
return 'master'
@property
def server_version(self):
return self._connection.server_version
def connection(self):
return self._connection.get()
def set_connection_kwargs(self, kwargs):
self._connection.set_conn_kwargs(kwargs)
def _query(self, sql, *params):
"""We are always using the same cursor, therefore this method is not thread-safe!!!
You can call it from different threads only if you are holding explicit `AsyncExecutor` lock,
because the main thread is always holding this lock when running HA cycle."""
cursor = None
try:
cursor = self._connection.cursor()
cursor.execute(sql, params)
return cursor
except psycopg2.Error as e:
if cursor and cursor.connection.closed == 0:
# When connected via unix socket, psycopg2 can't recoginze 'connection lost'
# and leaves `_cursor_holder.connection.closed == 0`, but psycopg2.OperationalError
# is still raised (what is correct). It doesn't make sense to continiue with existing
# connection and we will close it, to avoid its reuse by the `cursor` method.
if isinstance(e, psycopg2.OperationalError):
self._connection.close()
else:
raise e
if self.state == 'restarting':
raise RetryFailedError('cluster is being restarted')
raise PostgresConnectionException('connection problems')
def query(self, sql, *args, **kwargs):
if not kwargs.get('retry', True):
return self._query(sql, *args)
try:
return self.retry(self._query, sql, *args)
except RetryFailedError as e:
raise PostgresConnectionException(str(e))
def pg_control_exists(self):
return os.path.isfile(self._pg_control)
def data_directory_empty(self):
if self.pg_control_exists():
return False
return data_directory_is_empty(self._data_dir)
def replica_method_options(self, method):
return deepcopy(self.config.get(method, {}))
def replica_method_can_work_without_replication_connection(self, method):
return method != 'basebackup' and self.replica_method_options(method).get('no_master')
def can_create_replica_without_replication_connection(self, replica_methods=None):
""" go through the replication methods to see if there are ones
that does not require a working replication connection.
"""
if replica_methods is None:
replica_methods = self.create_replica_methods
return any(self.replica_method_can_work_without_replication_connection(m) for m in replica_methods)
def reset_cluster_info_state(self):
self._cluster_info_state = {}
def _cluster_info_state_get(self, name):
if not self._cluster_info_state:
try:
result = self._is_leader_retry(self._query, self.cluster_info_query).fetchone()
self._cluster_info_state = dict(zip(['timeline', 'wal_position', 'replayed_location',
'received_location', 'replay_paused', 'pg_control_timeline',
'received_tli', 'slot_name', 'conninfo'], result))
except RetryFailedError as e: # SELECT failed two times
self._cluster_info_state = {'error': str(e)}
if not self.is_starting() and self.pg_isready() == STATE_REJECT:
self.set_state('starting')
if 'error' in self._cluster_info_state:
raise PostgresConnectionException(self._cluster_info_state['error'])
return self._cluster_info_state.get(name)
def replayed_location(self):
return self._cluster_info_state_get('replayed_location')
def received_location(self):
return self._cluster_info_state_get('received_location')
def primary_slot_name(self):
return self._cluster_info_state_get('slot_name')
def primary_conninfo(self):
return self._cluster_info_state_get('conninfo')
def received_timeline(self):
return self._cluster_info_state_get('received_tli')
def is_leader(self):
return bool(self._cluster_info_state_get('timeline'))
def pg_control_timeline(self):
try:
return int(self.controldata().get("Latest checkpoint's TimeLineID"))
except (TypeError, ValueError):
logger.exception('Failed to parse timeline from pg_controldata output')
def latest_checkpoint_location(self):
"""Returns checkpoint location for the cleanly shut down primary"""
data = self.controldata()
lsn = data.get('Latest checkpoint location')
if data.get('Database cluster state') == 'shut down' and lsn:
try:
return str(parse_lsn(lsn))
except (IndexError, ValueError) as e:
logger.error('Exception when parsing lsn %s: %r', lsn, e)
def is_running(self):
"""Returns PostmasterProcess if one is running on the data directory or None. If most recently seen process
is running updates the cached process based on pid file."""
if self._postmaster_proc:
if self._postmaster_proc.is_running():
return self._postmaster_proc
self._postmaster_proc = None
# we noticed that postgres was restarted, force syncing of replication
self.slots_handler.schedule()
self._postmaster_proc = PostmasterProcess.from_pidfile(self._data_dir)
return self._postmaster_proc
@property
def cb_called(self):
return self.__cb_called
def call_nowait(self, cb_name):
""" pick a callback command and call it without waiting for it to finish """
if self.bootstrapping:
return
if cb_name in (ACTION_ON_START, ACTION_ON_STOP, ACTION_ON_RESTART, ACTION_ON_ROLE_CHANGE):
self.__cb_called = True
if self.callback and cb_name in self.callback:
cmd = self.callback[cb_name]
try:
cmd = shlex.split(self.callback[cb_name]) + [cb_name, self.role, self.scope]
self._callback_executor.call(cmd)
except Exception:
logger.exception('callback %s %s %s %s failed', cmd, cb_name, self.role, self.scope)
@property
def role(self):
with self._role_lock:
return self._role
def set_role(self, value):
with self._role_lock:
self._role = value
@property
def state(self):
with self._state_lock:
return self._state
def set_state(self, value):
with self._state_lock:
self._state = value
self._state_entry_timestamp = time.time()
def time_in_state(self):
return time.time() - self._state_entry_timestamp
def is_starting(self):
return self.state == 'starting'
def wait_for_port_open(self, postmaster, timeout):
"""Waits until PostgreSQL opens ports."""
for _ in polling_loop(timeout):
if self.cancellable.is_cancelled:
return False
if not postmaster.is_running():
logger.error('postmaster is not running')
self.set_state('start failed')
return False
isready = self.pg_isready()
if isready != STATE_NO_RESPONSE:
if isready not in [STATE_REJECT, STATE_RUNNING]:
logger.warning("Can't determine PostgreSQL startup status, assuming running")
return True
logger.warning("Timed out waiting for PostgreSQL to start")
return False
def start(self, timeout=None, task=None, block_callbacks=False, role=None):
"""Start PostgreSQL
Waits for postmaster to open ports or terminate so pg_isready can be used to check startup completion
or failure.
:returns: True if start was initiated and postmaster ports are open, False if start failed"""
# make sure we close all connections established against
# the former node, otherwise, we might get a stalled one
# after kill -9, which would report incorrect data to
# patroni.
self._connection.close()
if self.is_running():
logger.error('Cannot start PostgreSQL because one is already running.')
self.set_state('starting')
return True
if not block_callbacks:
self.__cb_pending = ACTION_ON_START
self.set_role(role or self.get_postgres_role_from_data_directory())
self.set_state('starting')
self._pending_restart = False
try:
if not self._major_version:
self.configure_server_parameters()
configuration = self.config.effective_configuration
except Exception:
return None
self.config.check_directories()
self.config.write_postgresql_conf(configuration)
self.config.resolve_connection_addresses()
self.config.replace_pg_hba()
self.config.replace_pg_ident()
options = ['--{0}={1}'.format(p, configuration[p]) for p in self.config.CMDLINE_OPTIONS
if p in configuration and p not in ('wal_keep_segments', 'wal_keep_size')]
if self.cancellable.is_cancelled:
return False
with task or null_context():
if task and task.is_cancelled:
logger.info("PostgreSQL start cancelled.")
return False
self._postmaster_proc = PostmasterProcess.start(self.pgcommand('postgres'),
self._data_dir,
self.config.postgresql_conf,
options)
if task:
task.complete(self._postmaster_proc)
start_timeout = timeout
if not start_timeout:
try:
start_timeout = float(self.config.get('pg_ctl_timeout', 60))
except ValueError:
start_timeout = 60
# We want postmaster to open ports before we continue
if not self._postmaster_proc or not self.wait_for_port_open(self._postmaster_proc, start_timeout):
return False
ret = self.wait_for_startup(start_timeout)
if ret is not None:
return ret
elif timeout is not None:
return False
else:
return None
def checkpoint(self, connect_kwargs=None, timeout=None):
check_not_is_in_recovery = connect_kwargs is not None
connect_kwargs = connect_kwargs or self.config.local_connect_kwargs
for p in ['connect_timeout', 'options']:
connect_kwargs.pop(p, None)
if timeout:
connect_kwargs['connect_timeout'] = timeout
try:
with get_connection_cursor(**connect_kwargs) as cur:
cur.execute("SET statement_timeout = 0")
if check_not_is_in_recovery:
cur.execute('SELECT pg_catalog.pg_is_in_recovery()')
if cur.fetchone()[0]:
return 'is_in_recovery=true'
return cur.execute('CHECKPOINT')
except psycopg2.Error:
logger.exception('Exception during CHECKPOINT')
return 'not accessible or not healty'
def stop(self, mode='fast', block_callbacks=False, checkpoint=None, on_safepoint=None, stop_timeout=None):
"""Stop PostgreSQL
Supports a callback when a safepoint is reached. A safepoint is when no user backend can return a successful
commit to users. Currently this means we wait for user backends to close. But in the future alternate mechanisms
could be added.
:param on_safepoint: This callback is called when no user backends are running.
"""
if checkpoint is None:
checkpoint = False if mode == 'immediate' else True
success, pg_signaled = self._do_stop(mode, block_callbacks, checkpoint, on_safepoint, stop_timeout)
if success:
# block_callbacks is used during restart to avoid
# running start/stop callbacks in addition to restart ones
if not block_callbacks:
self.set_state('stopped')
if pg_signaled:
self.call_nowait(ACTION_ON_STOP)
else:
logger.warning('pg_ctl stop failed')
self.set_state('stop failed')
return success
def _do_stop(self, mode, block_callbacks, checkpoint, on_safepoint, stop_timeout):
postmaster = self.is_running()
if not postmaster:
if on_safepoint:
on_safepoint()
return True, False
if checkpoint and not self.is_starting():
self.checkpoint(timeout=stop_timeout)
if not block_callbacks:
self.set_state('stopping')
# Send signal to postmaster to stop
success = postmaster.signal_stop(mode, self.pgcommand('pg_ctl'))
if success is not None:
if success and on_safepoint:
on_safepoint()
return success, True
# We can skip safepoint detection if we don't have a callback
if on_safepoint:
# Wait for our connection to terminate so we can be sure that no new connections are being initiated
self._wait_for_connection_close(postmaster)
postmaster.wait_for_user_backends_to_close()
on_safepoint()
try:
postmaster.wait(timeout=stop_timeout)
except TimeoutExpired:
logger.warning("Timeout during postmaster stop, aborting Postgres.")
if not self.terminate_postmaster(postmaster, mode, stop_timeout):
postmaster.wait()
return True, True
def terminate_postmaster(self, postmaster, mode, stop_timeout):
if mode in ['fast', 'smart']:
try:
success = postmaster.signal_stop('immediate', self.pgcommand('pg_ctl'))
if success:
return True
postmaster.wait(timeout=stop_timeout)
return True
except TimeoutExpired:
pass
logger.warning("Sending SIGKILL to Postmaster and its children")
return postmaster.signal_kill()
def terminate_starting_postmaster(self, postmaster):
"""Terminates a postmaster that has not yet opened ports or possibly even written a pid file. Blocks
until the process goes away."""
postmaster.signal_stop('immediate', self.pgcommand('pg_ctl'))
postmaster.wait()
def _wait_for_connection_close(self, postmaster):
try:
with self.connection().cursor() as cur:
while postmaster.is_running(): # Need a timeout here?
cur.execute("SELECT 1")
time.sleep(STOP_POLLING_INTERVAL)
except psycopg2.Error:
pass
def reload(self, block_callbacks=False):
ret = self.pg_ctl('reload')
if ret and not block_callbacks:
self.call_nowait(ACTION_ON_RELOAD)
return ret
def check_for_startup(self):
"""Checks PostgreSQL status and returns if PostgreSQL is in the middle of startup."""
return self.is_starting() and not self.check_startup_state_changed()
def check_startup_state_changed(self):
"""Checks if PostgreSQL has completed starting up or failed or still starting.
Should only be called when state == 'starting'
:returns: True if state was changed from 'starting'
"""
ready = self.pg_isready()
if ready == STATE_REJECT:
return False
elif ready == STATE_NO_RESPONSE:
ret = not self.is_running()
if ret:
self.set_state('start failed')
self.slots_handler.schedule(False) # TODO: can remove this?
self.config.save_configuration_files(True) # TODO: maybe remove this?
return ret
else:
if ready != STATE_RUNNING:
# Bad configuration or unexpected OS error. No idea of PostgreSQL status.
# Let the main loop of run cycle clean up the mess.
logger.warning("%s status returned from pg_isready",
"Unknown" if ready == STATE_UNKNOWN else "Invalid")
self.set_state('running')
self.slots_handler.schedule()
self.config.save_configuration_files(True)
# TODO: __cb_pending can be None here after PostgreSQL restarts on its own. Do we want to call the callback?
# Previously we didn't even notice.
action = self.__cb_pending or ACTION_ON_START
self.call_nowait(action)
self.__cb_pending = None
return True
def wait_for_startup(self, timeout=None):
"""Waits for PostgreSQL startup to complete or fail.
:returns: True if start was successful, False otherwise"""
if not self.is_starting():
# Should not happen
logger.warning("wait_for_startup() called when not in starting state")
while not self.check_startup_state_changed():
if self.cancellable.is_cancelled or timeout and self.time_in_state() > timeout:
return None
time.sleep(1)
return self.state == 'running'
def restart(self, timeout=None, task=None, block_callbacks=False, role=None):
"""Restarts PostgreSQL.
When timeout parameter is set the call will block either until PostgreSQL has started, failed to start or
timeout arrives.
:returns: True when restart was successful and timeout did not expire when waiting.
"""
self.set_state('restarting')
if not block_callbacks:
self.__cb_pending = ACTION_ON_RESTART
ret = self.stop(block_callbacks=True) and self.start(timeout, task, True, role)
if not ret and not self.is_starting():
self.set_state('restart failed ({0})'.format(self.state))
return ret
def is_healthy(self):
if not self.is_running():
logger.warning('Postgresql is not running.')
return False
return True
def get_guc_value(self, name):
cmd = [self.pgcommand('postgres'), '-D', self._data_dir, '-C', name]
try:
data = subprocess.check_output(cmd)
if data:
return data.decode('utf-8').strip()
except Exception as e:
logger.error('Failed to execute %s: %r', cmd, e)
def controldata(self):
""" return the contents of pg_controldata, or non-True value if pg_controldata call failed """
# Don't try to call pg_controldata during backup restore
if self._version_file_exists() and self.state != 'creating replica':
try:
env = os.environ.copy()
env.update(LANG='C', LC_ALL='C')
data = subprocess.check_output([self.pgcommand('pg_controldata'), self._data_dir], env=env)
if data:
data = filter(lambda e: ':' in e, data.decode('utf-8').splitlines())
# pg_controldata output depends on major version. Some of parameters are prefixed by 'Current '
return {k.replace('Current ', '', 1): v.strip() for k, v in map(lambda e: e.split(':', 1), data)}
except subprocess.CalledProcessError:
logger.exception("Error when calling pg_controldata")
return {}
@contextmanager
def get_replication_connection_cursor(self, host='localhost', port=5432, **kwargs):
conn_kwargs = self.config.replication.copy()
conn_kwargs.update(host=host, port=int(port) if port else None, user=conn_kwargs.pop('username'),
connect_timeout=3, replication=1, options='-c statement_timeout=2000')
with get_connection_cursor(**conn_kwargs) as cur:
yield cur
def get_replica_timeline(self):
try:
with self.get_replication_connection_cursor(**self.config.local_replication_address) as cur:
cur.execute('IDENTIFY_SYSTEM')
return cur.fetchone()[1]
except Exception:
logger.exception('Can not fetch local timeline and lsn from replication connection')
def replica_cached_timeline(self, master_timeline):
if not self._cached_replica_timeline or not master_timeline or self._cached_replica_timeline != master_timeline:
self._cached_replica_timeline = self.get_replica_timeline()
return self._cached_replica_timeline
def get_master_timeline(self):
return self._cluster_info_state_get('timeline')
def get_history(self, timeline):
history_path = os.path.join(self.wal_dir, '{0:08X}.history'.format(timeline))
history_mtime = mtime(history_path)
if history_mtime:
try:
with open(history_path, 'r') as f:
history = f.read()
history = list(parse_history(history))
if history[-1][0] == timeline - 1:
history_mtime = datetime.fromtimestamp(history_mtime).replace(tzinfo=tz.tzlocal())
history[-1].append(history_mtime.isoformat())
return history
except Exception:
logger.exception('Failed to read and parse %s', (history_path,))
def follow(self, member, role='replica', timeout=None, do_reload=False):
recovery_params = self.config.build_recovery_params(member)
self.config.write_recovery_conf(recovery_params)
# When we demoting the master or standby_leader to replica or promoting replica to a standby_leader
# and we know for sure that postgres was already running before, we will only execute on_role_change
# callback and prevent execution of on_restart/on_start callback.
# If the role remains the same (replica or standby_leader), we will execute on_start or on_restart
change_role = self.cb_called and (self.role in ('master', 'demoted') or
not {'standby_leader', 'replica'} - {self.role, role})
if change_role:
self.__cb_pending = ACTION_NOOP
if self.is_running():
if do_reload:
self.config.write_postgresql_conf()
if self.reload(block_callbacks=change_role) and change_role:
self.set_role(role)
else:
self.restart(block_callbacks=change_role, role=role)
else:
self.start(timeout=timeout, block_callbacks=change_role, role=role)
if change_role:
# TODO: postpone this until start completes, or maybe do even earlier
self.call_nowait(ACTION_ON_ROLE_CHANGE)
return True
def _wait_promote(self, wait_seconds):
for _ in polling_loop(wait_seconds):
data = self.controldata()
if data.get('Database cluster state') == 'in production':
return True
def _pre_promote(self):
"""
Runs a fencing script after the leader lock is acquired but before the replica is promoted.
If the script exits with a non-zero code, promotion does not happen and the leader key is removed from DCS.
"""
cmd = self.config.get('pre_promote')
if not cmd:
return True
ret = self.cancellable.call(shlex.split(cmd))
if ret is not None:
logger.info('pre_promote script `%s` exited with %s', cmd, ret)
return ret == 0
def promote(self, wait_seconds, task, on_success=None, access_is_restricted=False):
if self.role == 'master':
return True
ret = self._pre_promote()
with task:
if task.is_cancelled:
return False
task.complete(ret)
if ret is False:
return False
if self.cancellable.is_cancelled:
logger.info("PostgreSQL promote cancelled.")
return False
ret = self.pg_ctl('promote', '-W')
if ret:
self.set_role('master')
if on_success is not None:
on_success()
if not access_is_restricted:
self.call_nowait(ACTION_ON_ROLE_CHANGE)
ret = self._wait_promote(wait_seconds)
return ret
@staticmethod
def _wal_position(is_leader, wal_position, received_location, replayed_location):
return wal_position if is_leader else max(received_location or 0, replayed_location or 0)
def timeline_wal_position(self):
# This method could be called from different threads (simultaneously with some other `_query` calls).
# If it is called not from main thread we will create a new cursor to execute statement.
if current_thread().ident == self.__thread_ident:
timeline = self._cluster_info_state_get('timeline')
wal_position = self._cluster_info_state_get('wal_position')
replayed_location = self.replayed_location()
received_location = self.received_location()
pg_control_timeline = self._cluster_info_state_get('pg_control_timeline')
else:
with self.connection().cursor() as cursor:
cursor.execute(self.cluster_info_query)
(timeline, wal_position, replayed_location,
received_location, _, pg_control_timeline) = cursor.fetchone()[:6]
wal_position = self._wal_position(timeline, wal_position, received_location, replayed_location)
return (timeline, wal_position, pg_control_timeline)
def postmaster_start_time(self):
try:
query = "SELECT " + self.POSTMASTER_START_TIME
if current_thread().ident == self.__thread_ident:
return self.query(query).fetchone()[0]
with self.connection().cursor() as cursor:
cursor.execute(query)
return cursor.fetchone()[0]
except psycopg2.Error:
return None
def last_operation(self):
return str(self._wal_position(self.is_leader(), self._cluster_info_state_get('wal_position'),
self.received_location(), self.replayed_location()))
def configure_server_parameters(self):
self._major_version = self.get_major_version()
self.config.setup_server_parameters()
return True
def pg_wal_realpath(self):
"""Returns a dict containing the symlink (key) and target (value) for the wal directory"""
links = {}
for pg_wal_dir in ('pg_xlog', 'pg_wal'):
pg_wal_path = os.path.join(self._data_dir, pg_wal_dir)
if os.path.exists(pg_wal_path) and os.path.islink(pg_wal_path):
pg_wal_realpath = os.path.realpath(pg_wal_path)
links[pg_wal_path] = pg_wal_realpath
return links
def pg_tblspc_realpaths(self):
"""Returns a dict containing the symlink (key) and target (values) for the tablespaces"""
links = {}
pg_tblsp_dir = os.path.join(self._data_dir, 'pg_tblspc')
if os.path.exists(pg_tblsp_dir):
for tsdn in os.listdir(pg_tblsp_dir):
pg_tsp_path = os.path.join(pg_tblsp_dir, tsdn)
if parse_int(tsdn) and os.path.islink(pg_tsp_path):
pg_tsp_rpath = os.path.realpath(pg_tsp_path)
links[pg_tsp_path] = pg_tsp_rpath
return links
def move_data_directory(self):
if os.path.isdir(self._data_dir) and not self.is_running():
try:
postfix = time.strftime('%Y-%m-%d-%H-%M-%S')
# let's see if the wal directory is a symlink, in this case we
# should move the target
for (source, pg_wal_realpath) in self.pg_wal_realpath().items():
logger.info('renaming WAL directory and updating symlink: %s', pg_wal_realpath)
new_name = '{0}_{1}'.format(pg_wal_realpath, postfix)
os.rename(pg_wal_realpath, new_name)
os.unlink(source)
os.symlink(source, new_name)
# Move user defined tablespace directory
for (source, pg_tsp_rpath) in self.pg_tblspc_realpaths().items():
logger.info('renaming user defined tablespace directory and updating symlink: %s', pg_tsp_rpath)
new_name = '{0}_{1}'.format(pg_tsp_rpath, postfix)
os.rename(pg_tsp_rpath, new_name)
os.unlink(source)
os.symlink(source, new_name)
new_name = '{0}_{1}'.format(self._data_dir, postfix)
logger.info('renaming data directory to %s', new_name)
os.rename(self._data_dir, new_name)
except OSError:
logger.exception("Could not rename data directory %s", self._data_dir)
def remove_data_directory(self):
self.set_role('uninitialized')
logger.info('Removing data directory: %s', self._data_dir)
try:
if os.path.islink(self._data_dir):
os.unlink(self._data_dir)
elif not os.path.exists(self._data_dir):
return
elif os.path.isfile(self._data_dir):
os.remove(self._data_dir)
elif os.path.isdir(self._data_dir):
# let's see if wal directory is a symlink, in this case we
# should clean the target
for pg_wal_realpath in self.pg_wal_realpath().values():
logger.info('Removing WAL directory: %s', pg_wal_realpath)
shutil.rmtree(pg_wal_realpath)
# Remove user defined tablespace directories
for pg_tsp_rpath in self.pg_tblspc_realpaths().values():
logger.info('Removing user defined tablespace directory: %s', pg_tsp_rpath)
shutil.rmtree(pg_tsp_rpath, ignore_errors=True)
shutil.rmtree(self._data_dir)
except (IOError, OSError):
logger.exception('Could not remove data directory %s', self._data_dir)
self.move_data_directory()
def _get_synchronous_commit_param(self):
return self.query("SHOW synchronous_commit").fetchone()[0]
def pick_synchronous_standby(self, cluster, sync_node_count=1):
"""Finds the best candidate to be the synchronous standby.
Current synchronous standby is always preferred, unless it has disconnected or does not want to be a
synchronous standby any longer.
:returns tuple of candidates list and synchronous standby list.
"""
if self._major_version < 90600:
sync_node_count = 1
members = {m.name.lower(): m for m in cluster.members}
candidates = []
sync_nodes = []
# Pick candidates based on who has higher replay/remote_write/flush lsn.
sync_commit_par = self._get_synchronous_commit_param()
sort_col = {'remote_apply': 'replay', 'remote_write': 'write'}.get(sync_commit_par, 'flush')
# pg_stat_replication.sync_state has 4 possible states - async, potential, quorum, sync.
# Sort clause "ORDER BY sync_state DESC" is to get the result in required order and to keep
# the result consistent in case if a synchronous standby member is slowed down OR async node
# receiving changes faster than the sync member (very rare but possible). Such cases would
# trigger sync standby member swapping frequently and the sort on sync_state desc should
# help in keeping the query result consistent.
for app_name, state, sync_state in self.query(
"SELECT pg_catalog.lower(application_name), state, sync_state"
" FROM pg_catalog.pg_stat_replication"
" WHERE state = 'streaming'"
" ORDER BY sync_state DESC, {0}_{1} DESC".format(sort_col, self.lsn_name)):
member = members.get(app_name)
if not member or member.tags.get('nosync', False):
continue
candidates.append(member.name)
if sync_state == 'sync':
sync_nodes.append(member.name)
if len(candidates) >= sync_node_count:
break
return candidates, sync_nodes
def schedule_sanity_checks_after_pause(self):
"""
After coming out of pause we have to:
1. configure server parameters if necessary
2. sync replication slots, because it might happen that slots were removed
3. get new 'Database system identifier' to make sure that it wasn't changed
"""
if not self._major_version:
self.configure_server_parameters()
self.slots_handler.schedule()
self._sysid = None
| 41.420118 | 120 | 0.628238 |
aceecf1cc6cab3d6bbf740ee9a1fd23d02e9deed | 3,671 | py | Python | research/astronet/astronet/util/config_util.py | kopankom/models | 3f78f4cfd21c786c62bf321c07830071027ebb5e | [
"Apache-2.0"
] | 5 | 2018-04-03T15:54:54.000Z | 2020-02-01T08:19:38.000Z | research/astronet/astronet/util/config_util.py | kopankom/models | 3f78f4cfd21c786c62bf321c07830071027ebb5e | [
"Apache-2.0"
] | 1 | 2021-03-31T19:32:00.000Z | 2021-03-31T19:32:00.000Z | research/astronet/astronet/util/config_util.py | kopankom/models | 3f78f4cfd21c786c62bf321c07830071027ebb5e | [
"Apache-2.0"
] | 3 | 2018-04-27T15:37:08.000Z | 2021-12-06T12:00:53.000Z | # Copyright 2018 The TensorFlow Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for configurations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os.path
import tensorflow as tf
def parse_json(json_string_or_file):
"""Parses values from a JSON string or JSON file.
This function is useful for command line flags containing configuration
overrides. Using this function, the flag can be passed either as a JSON string
(e.g. '{"learning_rate": 1.0}') or the path to a JSON configuration file.
Args:
json_string_or_file: A JSON serialized string OR the path to a JSON file.
Returns:
A dictionary; the parsed JSON.
Raises:
ValueError: If the JSON could not be parsed.
"""
# First, attempt to parse the string as a JSON dict.
try:
json_dict = json.loads(json_string_or_file)
except ValueError as literal_json_parsing_error:
try:
# Otherwise, try to use it as a path to a JSON file.
with tf.gfile.Open(json_string_or_file) as f:
json_dict = json.load(f)
except ValueError as json_file_parsing_error:
raise ValueError("Unable to parse the content of the json file %s. "
"Parsing error: %s." % (json_string_or_file,
json_file_parsing_error.message))
except tf.gfile.FileError:
message = ("Unable to parse the input parameter neither as literal "
"JSON nor as the name of a file that exists.\n"
"JSON parsing error: %s\n\n Input parameter:\n%s." %
(literal_json_parsing_error.message, json_string_or_file))
raise ValueError(message)
return json_dict
def log_and_save_config(config, output_dir):
"""Logs and writes a JSON-serializable configuration object.
Args:
config: A JSON-serializable object.
output_dir: Destination directory.
"""
if hasattr(config, "to_json") and callable(config.to_json):
config_json = config.to_json(indent=2)
else:
config_json = json.dumps(config, indent=2)
tf.logging.info("config: %s", config_json)
tf.gfile.MakeDirs(output_dir)
with tf.gfile.Open(os.path.join(output_dir, "config.json"), "w") as f:
f.write(config_json)
def unflatten(flat_config):
"""Transforms a flat configuration dictionary into a nested dictionary.
Example:
{
"a": 1,
"b.c": 2,
"b.d.e": 3,
"b.d.f": 4,
}
would be transformed to:
{
"a": 1,
"b": {
"c": 2,
"d": {
"e": 3,
"f": 4,
}
}
}
Args:
flat_config: A dictionary with strings as keys where nested configuration
parameters are represented with period-separated names.
Returns:
A dictionary nested according to the keys of the input dictionary.
"""
config = {}
for path, value in flat_config.iteritems():
path = path.split(".")
final_key = path.pop()
nested_config = config
for key in path:
nested_config = nested_config.setdefault(key, {})
nested_config[final_key] = value
return config
| 30.338843 | 80 | 0.675565 |
aceed224a5289b9dbd494294ebc2e8782972eed4 | 36,519 | py | Python | mlrun/model.py | Michaelliv/mlrun | f155836f71e86cfcc573bcf1aa35762d72feeb5a | [
"Apache-2.0"
] | null | null | null | mlrun/model.py | Michaelliv/mlrun | f155836f71e86cfcc573bcf1aa35762d72feeb5a | [
"Apache-2.0"
] | null | null | null | mlrun/model.py | Michaelliv/mlrun | f155836f71e86cfcc573bcf1aa35762d72feeb5a | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 Iguazio
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import inspect
import re
import time
import warnings
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime
from os import environ
from typing import Dict, List, Optional, Tuple, Union
import mlrun
from .config import config
from .utils import dict_to_json, dict_to_yaml, get_artifact_target
class ModelObj:
_dict_fields = []
@staticmethod
def _verify_list(param, name):
if not isinstance(param, list):
raise ValueError(f"parameter {name} must be a list")
@staticmethod
def _verify_dict(param, name, new_type=None):
if (
param is not None
and not isinstance(param, dict)
and not hasattr(param, "to_dict")
):
raise ValueError(f"parameter {name} must be a dict or object")
if new_type and (isinstance(param, dict) or param is None):
return new_type.from_dict(param)
return param
def to_dict(self, fields=None, exclude=None):
"""convert the object to a python dictionary"""
struct = {}
fields = fields or self._dict_fields
if not fields:
fields = list(inspect.signature(self.__init__).parameters.keys())
for t in fields:
if not exclude or t not in exclude:
val = getattr(self, t, None)
if val is not None and not (isinstance(val, dict) and not val):
if hasattr(val, "to_dict"):
val = val.to_dict()
if val:
struct[t] = val
else:
struct[t] = val
return struct
@classmethod
def from_dict(cls, struct=None, fields=None, deprecated_fields: dict = None):
"""create an object from a python dictionary"""
struct = {} if struct is None else struct
deprecated_fields = deprecated_fields or {}
fields = fields or cls._dict_fields
if not fields:
fields = list(inspect.signature(cls.__init__).parameters.keys())
new_obj = cls()
if struct:
for key, val in struct.items():
if key in fields and key not in deprecated_fields:
setattr(new_obj, key, val)
for deprecated_field, new_field in deprecated_fields.items():
field_value = struct.get(new_field) or struct.get(deprecated_field)
if field_value:
setattr(new_obj, new_field, field_value)
return new_obj
def to_yaml(self):
"""convert the object to yaml"""
return dict_to_yaml(self.to_dict())
def to_json(self):
"""convert the object to json"""
return dict_to_json(self.to_dict())
def to_str(self):
"""convert the object to string (with dict layout)"""
return self.__str__()
def __str__(self):
return str(self.to_dict())
def copy(self):
"""create a copy of the object"""
return deepcopy(self)
# model class for building ModelObj dictionaries
class ObjectDict:
def __init__(self, classes_map, default_kind=""):
self._children = OrderedDict()
self._default_kind = default_kind
self._classes_map = classes_map
def values(self):
return self._children.values()
def keys(self):
return self._children.keys()
def items(self):
return self._children.items()
def __len__(self):
return len(self._children)
def __iter__(self):
yield from self._children.keys()
def __getitem__(self, name):
return self._children[name]
def __setitem__(self, key, item):
self._children[key] = self._get_child_object(item, key)
def __delitem__(self, key):
del self._children[key]
def update(self, key, item):
child = self._get_child_object(item, key)
self._children[key] = child
return child
def to_dict(self):
return {k: v.to_dict() for k, v in self._children.items()}
@classmethod
def from_dict(cls, classes_map: dict, children=None, default_kind=""):
if children is None:
return cls(classes_map, default_kind)
if not isinstance(children, dict):
raise ValueError("children must be a dict")
new_obj = cls(classes_map, default_kind)
for name, child in children.items():
child_obj = new_obj._get_child_object(child, name)
new_obj._children[name] = child_obj
return new_obj
def _get_child_object(self, child, name):
if hasattr(child, "kind") and child.kind in self._classes_map.keys():
child.name = name
return child
elif isinstance(child, dict):
kind = child.get("kind", self._default_kind)
if kind not in self._classes_map.keys():
raise ValueError(f"illegal object kind {kind}")
child_obj = self._classes_map[kind].from_dict(child)
child_obj.name = name
return child_obj
else:
raise ValueError(f"illegal child (should be dict or child kind), {child}")
def to_yaml(self):
return dict_to_yaml(self.to_dict())
def to_json(self):
return dict_to_json(self.to_dict())
def to_str(self):
return self.__str__()
def __str__(self):
return str(self.to_dict())
def copy(self):
return deepcopy(self)
class ObjectList:
def __init__(self, child_class):
self._children = OrderedDict()
self._child_class = child_class
def values(self):
return self._children.values()
def keys(self):
return self._children.keys()
def items(self):
return self._children.items()
def __len__(self):
return len(self._children)
def __iter__(self):
yield from self._children.values()
def __getitem__(self, name):
if isinstance(name, int):
return list(self._children.values())[name]
return self._children[name]
def __setitem__(self, key, item):
self.update(item, key)
def __delitem__(self, key):
del self._children[key]
def to_dict(self):
# method used by ModelObj class to serialize the object to nested dict
return [t.to_dict() for t in self._children.values()]
@classmethod
def from_list(cls, child_class, children=None):
if children is None:
return cls(child_class)
if not isinstance(children, list):
raise ValueError("states must be a list")
new_obj = cls(child_class)
for child in children:
name, child_obj = new_obj._get_child_object(child)
new_obj._children[name] = child_obj
return new_obj
def _get_child_object(self, child):
if isinstance(child, self._child_class):
return child.name, child
elif isinstance(child, dict):
if "name" not in child.keys():
raise ValueError("illegal object no 'name' field")
child_obj = self._child_class.from_dict(child)
return child_obj.name, child_obj
else:
raise ValueError(f"illegal child (should be dict or child kind), {child}")
def update(self, child, name=None):
object_name, child_obj = self._get_child_object(child)
child_obj.name = name or object_name
self._children[child_obj.name] = child_obj
return child_obj
class BaseMetadata(ModelObj):
def __init__(
self,
name=None,
tag=None,
hash=None,
namespace=None,
project=None,
labels=None,
annotations=None,
categories=None,
updated=None,
):
self.name = name
self.tag = tag
self.hash = hash
self.namespace = namespace
self.project = project or config.default_project
self.labels = labels or {}
self.categories = categories or []
self.annotations = annotations or {}
self.updated = updated
class ImageBuilder(ModelObj):
"""An Image builder"""
def __init__(
self,
functionSourceCode=None,
source=None,
image=None,
base_image=None,
commands=None,
extra=None,
secret=None,
code_origin=None,
registry=None,
load_source_on_run=None,
origin_filename=None,
):
self.functionSourceCode = functionSourceCode #: functionSourceCode
self.codeEntryType = "" #: codeEntryType
self.codeEntryAttributes = "" #: codeEntryAttributes
self.source = source #: source
self.code_origin = code_origin #: code_origin
self.origin_filename = origin_filename
self.image = image #: image
self.base_image = base_image #: base_image
self.commands = commands or [] #: commands
self.extra = extra #: extra
self.secret = secret #: secret
self.registry = registry #: registry
self.load_source_on_run = load_source_on_run #: load_source_on_run
self.build_pod = None
class RunMetadata(ModelObj):
"""Run metadata"""
def __init__(
self,
uid=None,
name=None,
project=None,
labels=None,
annotations=None,
iteration=None,
):
self.uid = uid
self._iteration = iteration
self.name = name
self.project = project
self.labels = labels or {}
self.annotations = annotations or {}
@property
def iteration(self):
return self._iteration or 0
@iteration.setter
def iteration(self, iteration):
self._iteration = iteration
class HyperParamStrategies:
grid = "grid"
list = "list"
random = "random"
custom = "custom"
@staticmethod
def all():
return [
HyperParamStrategies.grid,
HyperParamStrategies.list,
HyperParamStrategies.random,
HyperParamStrategies.custom,
]
class HyperParamOptions(ModelObj):
"""Hyper Parameter Options
Parameters:
param_file (str): hyper params input file path/url, instead of inline
strategy (str): hyper param strategy - grid, list or random
selector (str): selection criteria for best result ([min|max.]<result>), e.g. max.accuracy
stop_condition (str): early stop condition e.g. "accuracy > 0.9"
parallel_runs (int): number of param combinations to run in parallel (over Dask)
dask_cluster_uri (str): db uri for a deployed dask cluster function, e.g. db://myproject/dask
max_iterations (int): max number of runs (in random strategy)
max_errors (int): max number of child runs errors for the overall job to fail
teardown_dask (bool): kill the dask cluster pods after the runs
"""
def __init__(
self,
param_file=None,
strategy=None,
selector: HyperParamStrategies = None,
stop_condition=None,
parallel_runs=None,
dask_cluster_uri=None,
max_iterations=None,
max_errors=None,
teardown_dask=None,
):
self.param_file = param_file
self.strategy = strategy
self.selector = selector
self.stop_condition = stop_condition
self.max_iterations = max_iterations
self.max_errors = max_errors
self.parallel_runs = parallel_runs
self.dask_cluster_uri = dask_cluster_uri
self.teardown_dask = teardown_dask
def validate(self):
if self.strategy and self.strategy not in HyperParamStrategies.all():
raise mlrun.errors.MLRunInvalidArgumentError(
f"illegal hyper param strategy, use {','.join(HyperParamStrategies.all())}"
)
if self.max_iterations and self.strategy != HyperParamStrategies.random:
raise mlrun.errors.MLRunInvalidArgumentError(
"max_iterations is only valid in random strategy"
)
class RunSpec(ModelObj):
"""Run specification"""
def __init__(
self,
parameters=None,
hyperparams=None,
param_file=None,
selector=None,
handler=None,
inputs=None,
outputs=None,
input_path=None,
output_path=None,
function=None,
secret_sources=None,
data_stores=None,
strategy=None,
verbose=None,
scrape_metrics=None,
hyper_param_options=None,
):
self._hyper_param_options = None
self._inputs = inputs
self._outputs = outputs
self.hyper_param_options = hyper_param_options
self.parameters = parameters or {}
self.hyperparams = hyperparams or {}
self.param_file = param_file
self.strategy = strategy
self.selector = selector
self.handler = handler
self.input_path = input_path
self.output_path = output_path
self.function = function
self._secret_sources = secret_sources or []
self._data_stores = data_stores
self.verbose = verbose
self.scrape_metrics = scrape_metrics
def to_dict(self, fields=None, exclude=None):
struct = super().to_dict(fields, exclude=["handler"])
if self.handler and isinstance(self.handler, str):
struct["handler"] = self.handler
return struct
def is_hyper_job(self):
param_file = self.param_file or self.hyper_param_options.param_file
return param_file or self.hyperparams
@property
def inputs(self):
return self._inputs
@inputs.setter
def inputs(self, inputs):
self._inputs = self._verify_dict(inputs, "inputs")
@property
def hyper_param_options(self) -> HyperParamOptions:
return self._hyper_param_options
@hyper_param_options.setter
def hyper_param_options(self, hyper_param_options):
self._hyper_param_options = self._verify_dict(
hyper_param_options, "hyper_param_options", HyperParamOptions
)
@property
def outputs(self):
return self._outputs
@outputs.setter
def outputs(self, outputs):
self._verify_list(outputs, "outputs")
self._outputs = outputs
@property
def secret_sources(self):
return self._secret_sources
@secret_sources.setter
def secret_sources(self, secret_sources):
self._verify_list(secret_sources, "secret_sources")
self._secret_sources = secret_sources
@property
def data_stores(self):
return self._data_stores
@data_stores.setter
def data_stores(self, data_stores):
self._verify_list(data_stores, "data_stores")
self._data_stores = data_stores
@property
def handler_name(self):
if self.handler:
if inspect.isfunction(self.handler):
return self.handler.__name__
else:
return str(self.handler)
return ""
class RunStatus(ModelObj):
"""Run status"""
def __init__(
self,
state=None,
error=None,
host=None,
commit=None,
status_text=None,
results=None,
artifacts=None,
start_time=None,
last_update=None,
iterations=None,
ui_url=None,
):
self.state = state or "created"
self.status_text = status_text
self.error = error
self.host = host
self.commit = commit
self.results = results
self.artifacts = artifacts
self.start_time = start_time
self.last_update = last_update
self.iterations = iterations
self.ui_url = ui_url
class RunTemplate(ModelObj):
"""Run template"""
def __init__(self, spec: RunSpec = None, metadata: RunMetadata = None):
self._spec = None
self._metadata = None
self.spec = spec
self.metadata = metadata
@property
def spec(self) -> RunSpec:
return self._spec
@spec.setter
def spec(self, spec):
self._spec = self._verify_dict(spec, "spec", RunSpec)
@property
def metadata(self) -> RunMetadata:
return self._metadata
@metadata.setter
def metadata(self, metadata):
self._metadata = self._verify_dict(metadata, "metadata", RunMetadata)
def with_params(self, **kwargs):
"""set task parameters using key=value, key2=value2, .."""
self.spec.parameters = kwargs
return self
def with_input(self, key, path):
"""set task data input, path is an Mlrun global DataItem uri
examples::
task.with_input("data", "/file-dir/path/to/file")
task.with_input("data", "s3://<bucket>/path/to/file")
task.with_input("data", "v3io://[<remote-host>]/<data-container>/path/to/file")
"""
if not self.spec.inputs:
self.spec.inputs = {}
self.spec.inputs[key] = path
return self
def with_hyper_params(
self,
hyperparams,
selector=None,
strategy: HyperParamStrategies = None,
**options,
):
"""set hyper param values and configurations,
see parameters in: :py:class:`HyperParamOptions`
example::
grid_params = {"p1": [2,4,1], "p2": [10,20]}
task = mlrun.new_task("grid-search")
task.with_hyper_params(grid_params, selector="max.accuracy")
"""
self.spec.hyperparams = hyperparams
self.spec.hyper_param_options = options
self.spec.hyper_param_options.selector = selector
self.spec.hyper_param_options.strategy = strategy
self.spec.hyper_param_options.validate()
return self
def with_param_file(
self,
param_file,
selector=None,
strategy: HyperParamStrategies = None,
**options,
):
"""set hyper param values (from a file url) and configurations,
see parameters in: :py:class:`HyperParamOptions`
example::
grid_params = "s3://<my-bucket>/path/to/params.json"
task = mlrun.new_task("grid-search")
task.with_param_file(grid_params, selector="max.accuracy")
"""
self.spec.hyper_param_options = options
self.spec.hyper_param_options.param_file = param_file
self.spec.hyper_param_options.selector = selector
self.spec.hyper_param_options.strategy = strategy
self.spec.hyper_param_options.validate()
return self
def with_secrets(self, kind, source):
"""register a secrets source (file, env or dict)
read secrets from a source provider to be used in workflows, example::
task.with_secrets('file', 'file.txt')
task.with_secrets('inline', {'key': 'val'})
task.with_secrets('env', 'ENV1,ENV2')
task.with_secrets('vault', ['secret1', 'secret2'...])
# If using with k8s secrets, the k8s secret is managed by MLRun, through the project-secrets
# mechanism. The secrets will be attached to the running pod as environment variables.
task.with_secrets('kubernetes', ['secret1', 'secret2'])
# If using an empty secrets list [] then all accessible secrets will be available.
task.with_secrets('vault', [])
# To use with Azure key vault, a k8s secret must be created with the following keys:
# kubectl -n <namespace> create secret generic azure-key-vault-secret \\
# --from-literal=tenant_id=<service principal tenant ID> \\
# --from-literal=client_id=<service principal client ID> \\
# --from-literal=secret=<service principal secret key>
task.with_secrets('azure_vault', {
'name': 'my-vault-name',
'k8s_secret': 'azure-key-vault-secret',
# An empty secrets list may be passed ('secrets': []) to access all vault secrets.
'secrets': ['secret1', 'secret2'...]
})
:param kind: secret type (file, inline, env)
:param source: secret data or link (see example)
:returns: The RunTemplate object
"""
if kind == "vault" and isinstance(source, list):
source = {"project": self.metadata.project, "secrets": source}
self.spec.secret_sources.append({"kind": kind, "source": source})
return self
def set_label(self, key, value):
"""set a key/value label for the task"""
self.metadata.labels[key] = str(value)
return self
def to_env(self):
environ["MLRUN_EXEC_CONFIG"] = self.to_json()
class RunObject(RunTemplate):
"""A run"""
def __init__(
self,
spec: RunSpec = None,
metadata: RunMetadata = None,
status: RunStatus = None,
):
super().__init__(spec, metadata)
self._status = None
self.status = status
self.outputs_wait_for_completion = True
@classmethod
def from_template(cls, template: RunTemplate):
return cls(template.spec, template.metadata)
@property
def status(self) -> RunStatus:
return self._status
@status.setter
def status(self, status):
self._status = self._verify_dict(status, "status", RunStatus)
def output(self, key):
"""return the value of a specific result or artifact by key"""
if self.outputs_wait_for_completion:
self.wait_for_completion()
if self.status.results and key in self.status.results:
return self.status.results.get(key)
artifact = self._artifact(key)
if artifact:
return get_artifact_target(artifact, self.metadata.project)
return None
@property
def ui_url(self) -> str:
"""UI URL (for relevant runtimes)"""
self.refresh()
if not self._status.ui_url:
print("UI currently not available (status={})".format(self._status.state))
return self._status.ui_url
@property
def outputs(self):
"""return a dict of outputs, result values and artifact uris"""
outputs = {}
if self.outputs_wait_for_completion:
self.wait_for_completion()
if self.status.results:
outputs = {k: v for k, v in self.status.results.items()}
if self.status.artifacts:
for a in self.status.artifacts:
outputs[a["key"]] = get_artifact_target(a, self.metadata.project)
return outputs
def artifact(self, key) -> "mlrun.DataItem":
"""return artifact DataItem by key"""
if self.outputs_wait_for_completion:
self.wait_for_completion()
artifact = self._artifact(key)
if artifact:
uri = get_artifact_target(artifact, self.metadata.project)
if uri:
return mlrun.get_dataitem(uri)
return None
def _artifact(self, key):
"""return artifact DataItem by key"""
if self.status.artifacts:
for a in self.status.artifacts:
if a["key"] == key:
return a
return None
def uid(self):
"""run unique id"""
return self.metadata.uid
def state(self):
"""current run state"""
if self.status.state in mlrun.runtimes.constants.RunStates.terminal_states():
return self.status.state
self.refresh()
return self.status.state or "unknown"
def refresh(self):
"""refresh run state from the db"""
db = mlrun.get_run_db()
run = db.read_run(
uid=self.metadata.uid,
project=self.metadata.project,
iter=self.metadata.iteration,
)
if run:
self.status = RunStatus.from_dict(run.get("status", {}))
self.status.from_dict(run.get("status", {}))
return self
def show(self):
"""show the current status widget, in jupyter notebook"""
db = mlrun.get_run_db()
db.list_runs(uid=self.metadata.uid, project=self.metadata.project).show()
def logs(self, watch=True, db=None):
"""return or watch on the run logs"""
if not db:
db = mlrun.get_run_db()
if not db:
print("DB is not configured, cannot show logs")
return None
if db.kind == "http":
state = db.watch_log(self.metadata.uid, self.metadata.project, watch=watch)
else:
state, text = db.get_log(self.metadata.uid, self.metadata.project)
if text:
print(text.decode())
if state:
print(f"final state: {state}")
return state
def wait_for_completion(self, sleep=3, timeout=0, raise_on_failure=True):
"""wait for async run to complete"""
total_time = 0
while True:
state = self.state()
if state in mlrun.runtimes.constants.RunStates.terminal_states():
break
time.sleep(sleep)
total_time += sleep
if timeout and total_time > timeout:
raise mlrun.errors.MLRunTimeoutError(
"Run did not reach terminal state on time"
)
if raise_on_failure and state != mlrun.runtimes.constants.RunStates.completed:
self.logs(watch=False)
raise mlrun.errors.MLRunRuntimeError(
f"task {self.metadata.name} did not complete (state={state})"
)
return state
@staticmethod
def create_uri(project: str, uid: str, iteration: Union[int, str], tag: str = ""):
if tag:
tag = f":{tag}"
iteration = str(iteration)
return f"{project}@{uid}#{iteration}{tag}"
@staticmethod
def parse_uri(uri: str) -> Tuple[str, str, str, str]:
uri_pattern = (
r"^(?P<project>.*)@(?P<uid>.*)\#(?P<iteration>.*?)(:(?P<tag>.*))?$"
)
match = re.match(uri_pattern, uri)
if not match:
raise ValueError(
"Uri not in supported format <project>@<uid>#<iteration>[:tag]"
)
group_dict = match.groupdict()
return (
group_dict["project"],
group_dict["uid"],
group_dict["iteration"],
group_dict["tag"],
)
class EntrypointParam(ModelObj):
def __init__(
self,
name="",
type=None,
default=None,
doc="",
required=None,
choices: list = None,
):
self.name = name
self.type = type
self.default = default
self.doc = doc
self.required = required
self.choices = choices
class FunctionEntrypoint(ModelObj):
def __init__(self, name="", doc="", parameters=None, outputs=None, lineno=-1):
self.name = name
self.doc = doc
self.parameters = [] if parameters is None else parameters
self.outputs = [] if outputs is None else outputs
self.lineno = lineno
# TODO: remove in 0.9.0
def NewTask(
name=None,
project=None,
handler=None,
params=None,
hyper_params=None,
param_file=None,
selector=None,
strategy=None,
inputs=None,
outputs=None,
in_path=None,
out_path=None,
artifact_path=None,
secrets=None,
base=None,
):
"""Creates a new task - see new_task
"""
warnings.warn(
"NewTask will be deprecated in 0.7.0, and will be removed in 0.9.0, use new_task instead",
# TODO: In 0.7.0 and replace NewTask to new_task in examples & demos
PendingDeprecationWarning,
)
return new_task(
name,
project,
handler,
params,
hyper_params,
param_file,
selector,
strategy,
inputs,
outputs,
in_path,
out_path,
artifact_path,
secrets,
base,
)
def new_task(
name=None,
project=None,
handler=None,
params=None,
hyper_params=None,
param_file=None,
selector=None,
hyper_param_options=None,
inputs=None,
outputs=None,
in_path=None,
out_path=None,
artifact_path=None,
secrets=None,
base=None,
):
"""Creates a new task
:param name: task name
:param project: task project
:param handler: code entry-point/handler name
:param params: input parameters (dict)
:param hyper_params: dictionary of hyper parameters and list values, each
hyper param holds a list of values, the run will be
executed for every parameter combination (GridSearch)
:param param_file: a csv file with parameter combinations, first row hold
the parameter names, following rows hold param values
:param selector: selection criteria for hyper params e.g. "max.accuracy"
:param hyper_param_options: hyper parameter options, see: :py:class:`HyperParamOptions`
:param inputs: dictionary of input objects + optional paths (if path is
omitted the path will be the in_path/key.
:param outputs: dictionary of input objects + optional paths (if path is
omitted the path will be the out_path/key.
:param in_path: default input path/url (prefix) for inputs
:param out_path: default output path/url (prefix) for artifacts
:param artifact_path: default artifact output path
:param secrets: extra secrets specs, will be injected into the runtime
e.g. ['file=<filename>', 'env=ENV_KEY1,ENV_KEY2']
:param base: task instance to use as a base instead of a fresh new task instance
"""
if base:
run = deepcopy(base)
else:
run = RunTemplate()
run.metadata.name = name or run.metadata.name
run.metadata.project = project or run.metadata.project
run.spec.handler = handler or run.spec.handler
run.spec.parameters = params or run.spec.parameters
run.spec.inputs = inputs or run.spec.inputs
run.spec.outputs = outputs or run.spec.outputs or []
run.spec.input_path = in_path or run.spec.input_path
run.spec.output_path = artifact_path or out_path or run.spec.output_path
run.spec.secret_sources = secrets or run.spec.secret_sources or []
run.spec.hyperparams = hyper_params or run.spec.hyperparams
run.spec.hyper_param_options = hyper_param_options or run.spec.hyper_param_options
run.spec.hyper_param_options.param_file = (
param_file or run.spec.hyper_param_options.param_file
)
run.spec.hyper_param_options.selector = (
selector or run.spec.hyper_param_options.selector
)
return run
class DataSource(ModelObj):
"""online or offline data source spec"""
_dict_fields = [
"kind",
"name",
"path",
"attributes",
"key_field",
"time_field",
"schedule",
"online",
"workers",
"max_age",
"start_time",
"end_time",
]
kind = None
def __init__(
self,
name: str = None,
path: str = None,
attributes: Dict[str, str] = None,
key_field: str = None,
time_field: str = None,
schedule: str = None,
start_time: Optional[Union[datetime, str]] = None,
end_time: Optional[Union[datetime, str]] = None,
):
self.name = name
self.path = str(path) if path is not None else None
self.attributes = attributes
self.schedule = schedule
self.key_field = key_field
self.time_field = time_field
self.start_time = start_time
self.end_time = end_time
self.online = None
self.max_age = None
self.workers = None
self._secrets = {}
def set_secrets(self, secrets):
self._secrets = secrets
class DataTargetBase(ModelObj):
"""data target spec, specify a destination for the feature set data"""
_dict_fields = [
"name",
"kind",
"path",
"after_step",
"attributes",
"partitioned",
"key_bucketing_number",
"partition_cols",
"time_partitioning_granularity",
"max_events",
"flush_after_seconds",
]
# TODO - remove once "after_state" is fully deprecated
@classmethod
def from_dict(cls, struct=None, fields=None):
return super().from_dict(
struct, fields=fields, deprecated_fields={"after_state": "after_step"}
)
def __init__(
self,
kind: str = None,
name: str = "",
path=None,
attributes: Dict[str, str] = None,
after_step=None,
partitioned: bool = False,
key_bucketing_number: Optional[int] = None,
partition_cols: Optional[List[str]] = None,
time_partitioning_granularity: Optional[str] = None,
max_events: Optional[int] = None,
flush_after_seconds: Optional[int] = None,
after_state=None,
):
if after_state:
warnings.warn(
"The after_state parameter is deprecated. Use after_step instead",
# TODO: In 0.7.0 do changes in examples & demos In 0.9.0 remove
PendingDeprecationWarning,
)
after_step = after_step or after_state
self.name = name
self.kind: str = kind
self.path = path
self.after_step = after_step
self.attributes = attributes or {}
self.last_written = None
self.partitioned = partitioned
self.key_bucketing_number = key_bucketing_number
self.partition_cols = partition_cols
self.time_partitioning_granularity = time_partitioning_granularity
self.max_events = max_events
self.flush_after_seconds = flush_after_seconds
class FeatureSetProducer(ModelObj):
"""information about the task/job which produced the feature set data"""
def __init__(self, kind=None, name=None, uri=None, owner=None, sources=None):
self.kind = kind
self.name = name
self.owner = owner
self.uri = uri
self.sources = sources or {}
class DataTarget(DataTargetBase):
"""data target with extra status information (used in the feature-set/vector status)"""
_dict_fields = [
"name",
"kind",
"path",
"start_time",
"online",
"status",
"updated",
"size",
"last_written",
]
def __init__(
self, kind: str = None, name: str = "", path=None, online=None,
):
super().__init__(kind, name, path)
self.status = ""
self.updated = None
self.size = None
self.online = online
self.max_age = None
self.start_time = None
self.last_written = None
self._producer = None
self.producer = {}
@property
def producer(self) -> FeatureSetProducer:
return self._producer
@producer.setter
def producer(self, producer):
self._producer = self._verify_dict(producer, "producer", FeatureSetProducer)
class VersionedObjMetadata(ModelObj):
def __init__(
self,
name: str = None,
tag: str = None,
uid: str = None,
project: str = None,
labels: Dict[str, str] = None,
annotations: Dict[str, str] = None,
updated=None,
):
self.name = name
self.tag = tag
self.uid = uid
self.project = project
self.labels = labels or {}
self.annotations = annotations or {}
self.updated = updated
| 31.159556 | 106 | 0.603549 |
aceed2479058450dd37830c8b4dbebe6d4077be2 | 4,992 | py | Python | Dataset.py | bmahlbrand/Learning-to-Generate-Chairs-with-Convolutional-Neural-Networks | 122d975a67f683d72718a60febbde351764db8a6 | [
"MIT"
] | 6 | 2018-11-02T16:45:36.000Z | 2021-06-30T14:11:59.000Z | Dataset.py | bmahlbrand/Learning-to-Generate-Chairs-with-Convolutional-Neural-Networks | 122d975a67f683d72718a60febbde351764db8a6 | [
"MIT"
] | 1 | 2020-04-10T09:49:20.000Z | 2020-04-10T09:49:20.000Z | Dataset.py | bmahlbrand/Learning-to-Generate-Chairs-with-Convolutional-Neural-Networks | 122d975a67f683d72718a60febbde351764db8a6 | [
"MIT"
] | 5 | 2018-11-29T00:01:08.000Z | 2021-11-08T12:21:52.000Z | import os
import re
import numpy as np
from numpy import newaxis
import pandas
from skimage import io
from matplotlib import pyplot as plt
import pandas as pd
import torch
from torch.utils import data
from torchvision import transforms
import torchvision.transforms.functional as F
from utils.fs_utils import get_all_filenames
class Dataset():
def __init__(self, folderPath, is_train=True, output_size=128):
"""
Arguments:
is_train: whether used for training or validataion
folderPath: like "../data", is the folder storing chairs.train.csv and chairs.valid.csv
output_size: the desired output size of images and masks, in the paper, it is 64, 128, or 256.
"""
# get the path of train.csv and valid.csv
if is_train:
csv_path = folderPath + '/chairs.train.class.csv'
print("loading train dataset")
else:
csv_path = folderPath + '/chairs.valid.class.csv'
print("loading validataion dataset")
# read the data
self.data_info = pd.read_csv(csv_path, header=None)
# get the image path
# Note to remove the header!!
self.images = np.asarray(self.data_info.iloc[:, 0])[1:]
self.categories = np.asarray(self.data_info.iloc[:, 1])[1:]
self.masks = [re.sub(r'render', r'mask', image) for image in self.images]
print('loaded ', len(self.images), ' files')
# print(self.files)
# do the transformation
self.transformations = transforms.Compose([transforms.Resize(output_size),\
transforms.ToTensor()])
def __len__(self):
return len(self.images)
def __getitem__(self, index):
# load images
image_filename = self.images[index]
image = torch.from_numpy(io.imread(image_filename).transpose((2, 0, 1)))
#print(image.size())
#image = transforms.ToPILImage(image)
image = F.to_pil_image(image)
image = self.transformations(image)
# load the category of the image
category = int(self.categories[index])
# load masks
mask_filename = self.masks[index]
#print(io.imread(mask_filename).shape)
mask = torch.from_numpy(io.imread(mask_filename)[:,:,newaxis].transpose((2, 0, 1)))
#print(mask.size())
mask = F.to_pil_image(mask)
mask = self.transformations(mask)
# path: like "../data/rendered_chairs/b663f1e4df6c51fe19fb4103277a6b93/renders"
# filename: like "image_001_p020_t011_r096.png"
_, filename = os.path.split(image_filename)
# get the pieces of image filename to parse the parameters
# pieces = filename.split('_')
#parse the parameters from file name and return those too
pieces = filename.split('_')[1:] #split and throw images away
# print(pieces)
# id = pieces[0]
# remove the first char to get the parameters
phi = int(pieces[1].strip('p'))
theta = int(pieces[2].strip('t'))
#rho = int(pieces[3].strip('r').split('.')[0])
# construct the class vector
c = np.zeros(809)
c[category] = 1
# construct the view vector
v = np.zeros(4)
v[0] = np.sin(theta/180 * np.pi)
v[1] = np.cos(theta/180 * np.pi)
v[2] = np.sin(phi/180 * np.pi)
v[3] = np.sin(phi/180 * np.pi)
# construct the tranform parameter vector
t = np.ones(12)
# transform them into tensor and into tench.FloatTensor
c = torch.from_numpy(c)
v = torch.from_numpy(v)
t = torch.from_numpy(t)
c = c.float()
v = v.float()
t = t.float()
return image, mask, c, v, t
# dataset = Dataset('datasets/lspet_dataset', myTransforms.Compose([myTransforms.TestResized((368, 368))]))
# loader = DataLoader(
# dataset,
# batch_size=10,
# num_workers=0,
# shuffle=False
# )
def computeStatistics(loader):
mean = 0.
std = 0.
nb_samples = 0.
for data in loader:
batch_samples = data.size(0)
data = data.view(batch_samples, data.size(1), -1)
mean += data.mean(2).sum(0)
std += data.std(2).sum(0)
nb_samples += batch_samples
mean /= nb_samples
std /= nb_samples
return mean, std
# if __name__ == '__main__':
# print('computing std and mean of LSPet')
# print(computeStatistics(dataset))
if __name__ == '__main__':
image, mask, phi, theta, rho = Dataset('../data', is_train=False).__getitem__(100)
print(image.size())
print(mask.size())
print(image.numpy().shape)
print(mask.numpy().shape)
#plt.imshow(image.numpy().transpose(1, 2, 0))
plt.imshow(torch.squeeze(mask).numpy())
plt.show()
print("phi: ", phi)
print("theta: ", theta)
print("rho: ", rho)
| 31.796178 | 119 | 0.596354 |
aceed259d984e47b27c8403760a78d6e3299ddc0 | 1,643 | py | Python | example.py | lobocv/anonymoususage | 847bdad0746ad1cc6c57fb9def201beb59fb8300 | [
"MIT"
] | 1 | 2018-08-31T15:29:56.000Z | 2018-08-31T15:29:56.000Z | example.py | lobocv/anonymoususage | 847bdad0746ad1cc6c57fb9def201beb59fb8300 | [
"MIT"
] | null | null | null | example.py | lobocv/anonymoususage | 847bdad0746ad1cc6c57fb9def201beb59fb8300 | [
"MIT"
] | 2 | 2015-11-09T05:58:23.000Z | 2017-11-15T14:41:12.000Z | import uuid
import datetime
import logging
logging.getLogger().setLevel(logging.DEBUG)
from anonymoususage import AnonymousUsageTracker, NO_STATE
unique_identifier = uuid.uuid4().hex
database_path = './test.db'
submit_interval = datetime.timedelta(hours=1)
tracker = AnonymousUsageTracker(uuid=unique_identifier,
filepath=database_path,
check_interval_s=30,
submit_interval_s=60*60)
tracker.setup_hq(host='http://127.0.0.1:5010', api_key='1fd5451sdr83523ks234')
tracker.track_statistic('quests_complete')
tracker['quests_complete'].increment(1)
tracker['quests_complete'].decrement(1)
tracker.track_statistic('monsters_killed')
tracker.track_sequence('round_trip', checkpoints=('The Shire', 'Mordor', 'Gondor'))
tracker.track_state('server', initial_state=NO_STATE)
tracker.track_time('play_time')
def login(server_name, username, password):
# Login code goes here
# Start the play_time timer.
tracker['play_time'].start_timer()
tracker['server'] = server_name
def logoff():
# Logoff code goes here
# Stop the timer
tracker['play_time'].stop_timer()
def hand_in_quests(quests):
# Completing quest code goes here
tracker['quests_complete'] += len(quests)
def kill_monster():
# Completing quest code goes here
tracker['monsters_killed'] += 1
def go_to_town(town):
tracker['round_trip'] = town
login('ServerA', 'calvin', 'mypassword')
go_to_town('The Shire')
go_to_town('Mordor')
kill_monster()
kill_monster()
go_to_town('Gondor')
hand_in_quests(['Kill Two Monsters'])
tracker.close()
| 26.079365 | 83 | 0.711503 |
aceed449a521a1a9017543c25ee6b126179c365b | 461 | py | Python | submissions/abc162/e.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 1 | 2021-05-10T01:16:28.000Z | 2021-05-10T01:16:28.000Z | submissions/abc162/e.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 3 | 2021-05-11T06:14:15.000Z | 2021-06-19T08:18:36.000Z | submissions/abc162/e.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | null | null | null | import sys
read = sys.stdin.buffer.read
readline = sys.stdin.buffer.readline
readlines = sys.stdin.buffer.readlines
sys.setrecursionlimit(10 ** 7)
n, k = map(int, read().split())
mod = 10 ** 9 + 7
memo = [0] * (k + 1)
ans = 0
for x in range(k, 0, -1):
v = k // x
tmp = pow(v, n, mod)
for i in range(2, v + 1):
tmp -= memo[i * x]
tmp %= mod
memo[x] = tmp
for i, vv in enumerate(memo):
ans += i * vv
ans %= mod
print(ans)
| 20.954545 | 38 | 0.557484 |
aceed4e18dcb00389dc3f9bb0a36daa55ca98a9f | 1,806 | py | Python | src/count_overlaps.py | BorgwardtLab/graphkernels-review | 3dfc2fad64d4159722f06db11b555fc568997fcf | [
"BSD-3-Clause"
] | 3 | 2021-02-27T07:21:30.000Z | 2021-07-30T21:10:05.000Z | src/count_overlaps.py | BorgwardtLab/graphkernels-review | 3dfc2fad64d4159722f06db11b555fc568997fcf | [
"BSD-3-Clause"
] | null | null | null | src/count_overlaps.py | BorgwardtLab/graphkernels-review | 3dfc2fad64d4159722f06db11b555fc568997fcf | [
"BSD-3-Clause"
] | 3 | 2021-01-20T14:03:52.000Z | 2021-08-23T08:36:25.000Z | #!/usr/bin/env python3
#
# count_overlaps.py: collects accuracies and standard deviations of all
# graph kernels from a large CSV file, and calculates how many overlaps
# there are. Here, an overlap indicates that the performance of kernels
# cannot be distinguished on a per-fold basis.
import argparse
import numpy as np
import pandas as pd
def overlaps(m0, s0, m1, s1):
'''
Checks whether an interval defined by a mean and a standard
deviation overlaps with another.
'''
a = m0 - s0
b = m0 + s1
c = m1 - s1
d = m1 + s1
return (b >= c and a <= d) or (d >= a and c <= b)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('FILE', type=str, help='Input file')
args = parser.parse_args()
df = pd.read_csv(args.FILE, header=0, index_col=0)
print('data_set,n_overlaps,n_pairs')
with open(f'../output/Overlap.csv', 'a') as f:
print(f'data_set,n_overlaps,n_pairs', file=f)
for column in df.columns:
data_set, values = df[column].name, df[column].values
algorithm = column
data = []
for value in values:
if value is not np.nan:
m, s = value.split('+-')
m = float(m.strip())
s = float(s.strip())
data.append((m, s))
n_overlaps = 0
n_pairs = 0
for i, (m0, s0) in enumerate(data):
for j, (m1, s1) in enumerate(data[i+1:]):
k = j + i + 1
if overlaps(m0, s0, m1, s1):
n_overlaps += 1
n_pairs += 1
print(f'{data_set},{n_overlaps},{n_pairs}')
with open(f'../output/Overlap.csv', 'a') as f:
print(f'{data_set},{n_overlaps},{n_pairs}', file=f)
| 25.43662 | 71 | 0.560908 |
aceed5d1dbbd2b3c627eb676eabc1d02d8da885e | 4,467 | py | Python | lomit_munit/test.py | cc-ai/floods-gans | 787dc2a3c08483c68a687b4355c0f0f6f2711ab9 | [
"Apache-2.0"
] | 5 | 2019-05-07T15:14:58.000Z | 2020-11-23T00:21:50.000Z | lomit_munit/test.py | cc-ai/floods | 787dc2a3c08483c68a687b4355c0f0f6f2711ab9 | [
"Apache-2.0"
] | 13 | 2019-04-25T01:06:20.000Z | 2022-03-11T23:51:04.000Z | lomit_munit/test.py | cc-ai/floods | 787dc2a3c08483c68a687b4355c0f0f6f2711ab9 | [
"Apache-2.0"
] | 4 | 2019-04-24T18:06:10.000Z | 2020-07-15T18:02:56.000Z | """
Copyright (C) 2018 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
from __future__ import print_function
from utils import get_config, pytorch03_to_pytorch04
from trainer import MUNIT_Trainer, UNIT_Trainer
import argparse
from torch.autograd import Variable
import torchvision.utils as vutils
import sys
import torch
import os
from torchvision import transforms
from PIL import Image
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, help="net configuration")
parser.add_argument('--input', type=str, help="input image path")
parser.add_argument('--output_folder', type=str, help="output image path")
parser.add_argument('--checkpoint', type=str, help="checkpoint of autoencoders")
parser.add_argument('--style', type=str, default='', help="style image path")
parser.add_argument('--a2b', type=int, default=1, help="1 for a2b and others for b2a")
parser.add_argument('--seed', type=int, default=10, help="random seed")
parser.add_argument('--num_style',type=int, default=10, help="number of styles to sample")
parser.add_argument('--synchronized', action='store_true', help="whether use synchronized style code or not")
parser.add_argument('--output_only', action='store_true', help="whether use synchronized style code or not")
parser.add_argument('--output_path', type=str, default='.', help="path for logs, checkpoints, and VGG model weight")
parser.add_argument('--trainer', type=str, default='MUNIT', help="MUNIT|UNIT")
opts = parser.parse_args()
torch.manual_seed(opts.seed)
torch.cuda.manual_seed(opts.seed)
if not os.path.exists(opts.output_folder):
os.makedirs(opts.output_folder)
# Load experiment setting
config = get_config(opts.config)
opts.num_style = 1 if opts.style != '' else opts.num_style
# Setup model and data loader
config['vgg_model_path'] = opts.output_path
if opts.trainer == 'MUNIT':
style_dim = config['gen']['style_dim']
trainer = MUNIT_Trainer(config)
elif opts.trainer == 'UNIT':
trainer = UNIT_Trainer(config)
else:
sys.exit("Only support MUNIT|UNIT")
try:
state_dict = torch.load(opts.checkpoint)
trainer.gen_a.load_state_dict(state_dict['a'])
trainer.gen_b.load_state_dict(state_dict['b'])
except:
state_dict = pytorch03_to_pytorch04(torch.load(opts.checkpoint), opts.trainer)
trainer.gen_a.load_state_dict(state_dict['a'])
trainer.gen_b.load_state_dict(state_dict['b'])
trainer.cuda()
trainer.eval()
encode = trainer.gen_a.encode if opts.a2b else trainer.gen_b.encode # encode function
style_encode = trainer.gen_b.encode if opts.a2b else trainer.gen_a.encode # encode function
decode = trainer.gen_b.decode if opts.a2b else trainer.gen_a.decode # decode function
if 'new_size' in config:
new_size = config['new_size']
else:
if opts.a2b==1:
new_size = config['new_size_a']
else:
new_size = config['new_size_b']
with torch.no_grad():
transform = transforms.Compose([transforms.Resize(new_size),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
image = Variable(transform(Image.open(opts.input).convert('RGB')).unsqueeze(0).cuda())
style_image = Variable(transform(Image.open(opts.style).convert('RGB')).unsqueeze(0).cuda()) if opts.style != '' else None
# Start testing
content, _ = encode(image)
if opts.trainer == 'MUNIT':
style_rand = Variable(torch.randn(opts.num_style, style_dim, 1, 1).cuda())
if opts.style != '':
_, style = style_encode(style_image)
else:
style = style_rand
for j in range(opts.num_style):
s = style[j].unsqueeze(0)
outputs = decode(content, s)
outputs = (outputs + 1) / 2.
path = os.path.join(opts.output_folder, 'output{:03d}.jpg'.format(j))
vutils.save_image(outputs.data, path, padding=0, normalize=True)
elif opts.trainer == 'UNIT':
outputs = decode(content)
outputs = (outputs + 1) / 2.
path = os.path.join(opts.output_folder, 'output.jpg')
vutils.save_image(outputs.data, path, padding=0, normalize=True)
else:
pass
if not opts.output_only:
# also save input images
vutils.save_image(image.data, os.path.join(opts.output_folder, 'input.jpg'), padding=0, normalize=True)
| 40.609091 | 126 | 0.697784 |
aceed644e3b1a995aa0a5451b21a48e2fab140d4 | 3,619 | py | Python | band_dashboard/urls.py | KonichiwaKen/band-dashboard | 5c151c81255154d0dec2ee50ca191b5a78cfd9a4 | [
"MIT"
] | null | null | null | band_dashboard/urls.py | KonichiwaKen/band-dashboard | 5c151c81255154d0dec2ee50ca191b5a78cfd9a4 | [
"MIT"
] | null | null | null | band_dashboard/urls.py | KonichiwaKen/band-dashboard | 5c151c81255154d0dec2ee50ca191b5a78cfd9a4 | [
"MIT"
] | null | null | null | """band_dashboard URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include
from django.conf.urls import patterns
from django.conf.urls import url
from rest_framework_nested import routers
from attendance.views import AcceptSubstitutionForm
from attendance.views import AttendanceViewSet
from attendance.views import DeclineSubstitutionForm
from attendance.views import EventTypeViewSet
from attendance.views import EventViewSet
from attendance.views import GetPendingSubstitutionForms
from attendance.views import GetUnassignedMembersView
from attendance.views import SubstitutionFormViewSet
from attendance.views import UnassignedAttendanceView
from authentication.views import AccountViewSet
from authentication.views import CreateAccountsView
from authentication.views import CreatePasswordView
from authentication.views import LoginView
from authentication.views import LogoutView
from band_dashboard.views import IndexView
from members.views import BandMemberViewSet
from members.views import BandViewSet
from members.views import BandAssignmentView
from members.views import UnassignedMembersView
router = routers.SimpleRouter()
router.register(r'accounts', AccountViewSet)
router.register(r'attendance/event', EventViewSet, base_name='event')
router.register(r'attendance/event_type', EventTypeViewSet)
router.register(r'attendance/event_attendance', AttendanceViewSet, base_name='event_attendance')
router.register(r'members', BandMemberViewSet)
router.register(r'members/band', BandViewSet)
router.register(
r'attendance/substitution_form',
SubstitutionFormViewSet,
base_name='substitution_form')
urlpatterns = patterns(
'',
url(r'^api/v1/', include(router.urls)),
url(
r'^api/v1/attendance/unassigned/$',
UnassignedAttendanceView.as_view(),
name='unassigned_attendance'),
url(r'^api/v1/auth/login/$', LoginView.as_view(), name='login'),
url(r'^api/v1/auth/logout/$', LogoutView.as_view(), name='logout'),
url(r'^api/v1/band_assignments/$', BandAssignmentView.as_view(), name='band_assignments'),
url(
r'^api/v1/members/unassigned/$',
UnassignedMembersView.as_view(),
name='unassigned_members'),
url(
r'^api/v1/get_unassigned_members/$',
GetUnassignedMembersView.as_view(),
name='get_unassigned_members'),
url(
r'^api/v1/pending_substitution_forms/$',
GetPendingSubstitutionForms.as_view(),
name='get_pending_substitution_forms'),
url(
r'^api/v1/attendance/accept_substitution_form/$',
AcceptSubstitutionForm.as_view(),
name='accept_substitution_form'),
url(
r'^api/v1/attendance/decline_substitution_form/$',
DeclineSubstitutionForm.as_view(),
name='decline_substitution_form'),
url(r'^api/v1/create_accounts/$', CreateAccountsView.as_view(), name='create_accounts'),
url(r'^api/v1/create_password/$', CreatePasswordView.as_view(), name='create_password'),
url('^.*$', IndexView.as_view(), name='index'),
)
| 41.125 | 96 | 0.754076 |
aceed7218d0b0aaf201334a7c1d9b0464e20716b | 71,254 | py | Python | python/ccxt/async_support/binance.py | Itchibon777/ccxt | 040f39afda443fc3981ae70789a9e395f5fc0029 | [
"MIT"
] | 1 | 2021-11-03T06:16:16.000Z | 2021-11-03T06:16:16.000Z | python/ccxt/async_support/binance.py | Itchibon777/ccxt | 040f39afda443fc3981ae70789a9e395f5fc0029 | [
"MIT"
] | null | null | null | python/ccxt/async_support/binance.py | Itchibon777/ccxt | 040f39afda443fc3981ae70789a9e395f5fc0029 | [
"MIT"
] | 2 | 2021-05-07T09:11:54.000Z | 2021-11-27T16:29:10.000Z | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.async_support.base.exchange import Exchange
import math
import json
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidAddress
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import DDoSProtection
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import InvalidNonce
from ccxt.base.decimal_to_precision import ROUND
class binance(Exchange):
def describe(self):
return self.deep_extend(super(binance, self).describe(), {
'id': 'binance',
'name': 'Binance',
'countries': ['JP', 'MT'], # Japan, Malta
'rateLimit': 500,
'certified': True,
# new metainfo interface
'has': {
'fetchDepositAddress': True,
'CORS': False,
'fetchBidsAsks': True,
'fetchTickers': True,
'fetchTime': True,
'fetchOHLCV': True,
'fetchMyTrades': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOpenOrders': True,
'fetchClosedOrders': 'emulated',
'withdraw': True,
'fetchFundingFees': True,
'fetchDeposits': True,
'fetchWithdrawals': True,
'fetchTransactions': False,
},
'timeframes': {
'1m': '1m',
'3m': '3m',
'5m': '5m',
'15m': '15m',
'30m': '30m',
'1h': '1h',
'2h': '2h',
'4h': '4h',
'6h': '6h',
'8h': '8h',
'12h': '12h',
'1d': '1d',
'3d': '3d',
'1w': '1w',
'1M': '1M',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/29604020-d5483cdc-87ee-11e7-94c7-d1a8d9169293.jpg',
'api': {
'web': 'https://www.binance.com',
'wapi': 'https://api.binance.com/wapi/v3',
'sapi': 'https://api.binance.com/sapi/v1',
'fapiPublic': 'https://fapi.binance.com/fapi/v1',
'fapiPrivate': 'https://fapi.binance.com/fapi/v1',
'public': 'https://api.binance.com/api/v3',
'private': 'https://api.binance.com/api/v3',
'v3': 'https://api.binance.com/api/v3',
'v1': 'https://api.binance.com/api/v1',
},
'www': 'https://www.binance.com',
'referral': 'https://www.binance.com/?ref=10205187',
'doc': [
'https://binance-docs.github.io/apidocs/spot/en',
],
'api_management': 'https://www.binance.com/en/usercenter/settings/api-management',
'fees': 'https://www.binance.com/en/fee/schedule',
},
'api': {
'web': {
'get': [
'exchange/public/product',
'assetWithdraw/getAllAsset.html',
],
},
# the API structure below will need 3-layer apidefs
'sapi': {
'get': [
# these endpoints require self.apiKey
'margin/asset',
'margin/pair',
'margin/allAssets',
'margin/allPairs',
'margin/priceIndex',
# these endpoints require self.apiKey + self.secret
'asset/assetDividend',
'margin/loan',
'margin/repay',
'margin/account',
'margin/transfer',
'margin/interestHistory',
'margin/forceLiquidationRec',
'margin/order',
'margin/openOrders',
'margin/allOrders',
'margin/myTrades',
'margin/maxBorrowable',
'margin/maxTransferable',
'futures/transfer',
# https://binance-docs.github.io/apidocs/spot/en/#withdraw-sapi
'capital/config/getall', # get networks for withdrawing USDT ERC20 vs USDT Omni
],
'post': [
'asset/dust',
'margin/transfer',
'margin/loan',
'margin/repay',
'margin/order',
'userDataStream',
'futures/transfer',
],
'put': [
'userDataStream',
],
'delete': [
'margin/order',
'userDataStream',
],
},
'wapi': {
'post': [
'withdraw',
'sub-account/transfer',
],
'get': [
'depositHistory',
'withdrawHistory',
'depositAddress',
'accountStatus',
'systemStatus',
'apiTradingStatus',
'userAssetDribbletLog',
'tradeFee',
'assetDetail',
'sub-account/list',
'sub-account/transfer/history',
'sub-account/assets',
],
},
'fapiPublic': {
'get': [
'ping',
'time',
'exchangeInfo',
'depth',
'trades',
'historicalTrades',
'aggTrades',
'klines',
'premiumIndex',
'ticker/24hr',
'ticker/price',
'ticker/bookTicker',
],
'put': ['listenKey'],
'post': ['listenKey'],
'delete': ['listenKey'],
},
'fapiPrivate': {
'get': [
'allOrders',
'openOrders',
'order',
'account',
'balance',
'positionMargin/history',
'positionRisk',
'userTrades',
'income',
],
'post': [
'positionMargin',
'marginType',
'order',
'leverage',
],
'delete': [
'order',
],
},
'v3': {
'get': [
'ticker/price',
'ticker/bookTicker',
],
},
'public': {
'get': [
'ping',
'time',
'depth',
'trades',
'aggTrades',
'historicalTrades',
'klines',
'ticker/24hr',
'ticker/price',
'ticker/bookTicker',
'exchangeInfo',
],
'put': ['userDataStream'],
'post': ['userDataStream'],
'delete': ['userDataStream'],
},
'private': {
'get': [
'allOrderList', # oco
'openOrderList', # oco
'orderList', # oco
'order',
'openOrders',
'allOrders',
'account',
'myTrades',
],
'post': [
'order/oco',
'order',
'order/test',
],
'delete': [
'orderList', # oco
'order',
],
},
},
'fees': {
'trading': {
'tierBased': False,
'percentage': True,
'taker': 0.001,
'maker': 0.001,
},
},
'commonCurrencies': {
'BCC': 'BCC', # kept for backward-compatibility https://github.com/ccxt/ccxt/issues/4848
'YOYO': 'YOYOW',
},
# exchange-specific options
'options': {
'fetchTradesMethod': 'publicGetAggTrades',
'fetchTickersMethod': 'publicGetTicker24hr',
'defaultTimeInForce': 'GTC', # 'GTC' = Good To Cancel(default), 'IOC' = Immediate Or Cancel
'defaultLimitOrderType': 'limit', # or 'limit_maker'
'defaultType': 'spot', # 'spot', 'future'
'hasAlreadyAuthenticatedSuccessfully': False,
'warnOnFetchOpenOrdersWithoutSymbol': True,
'recvWindow': 5 * 1000, # 5 sec, binance default
'timeDifference': 0, # the difference between system clock and Binance clock
'adjustForTimeDifference': False, # controls the adjustment logic upon instantiation
'parseOrderToPrecision': False, # force amounts and costs in parseOrder to precision
'newOrderRespType': {
'market': 'FULL', # 'ACK' for order id, 'RESULT' for full order or 'FULL' for order with fills
'limit': 'RESULT', # we change it from 'ACK' by default to 'RESULT'
},
},
'exceptions': {
'API key does not exist': AuthenticationError,
'Order would trigger immediately.': InvalidOrder,
'Account has insufficient balance for requested action.': InsufficientFunds,
'Rest API trading is not enabled.': ExchangeNotAvailable,
'-1000': ExchangeNotAvailable, # {"code":-1000,"msg":"An unknown error occured while processing the request."}
'-1013': InvalidOrder, # createOrder -> 'invalid quantity'/'invalid price'/MIN_NOTIONAL
'-1021': InvalidNonce, # 'your time is ahead of server'
'-1022': AuthenticationError, # {"code":-1022,"msg":"Signature for self request is not valid."}
'-1100': InvalidOrder, # createOrder(symbol, 1, asdf) -> 'Illegal characters found in parameter 'price'
'-1104': ExchangeError, # Not all sent parameters were read, read 8 parameters but was sent 9
'-1128': ExchangeError, # {"code":-1128,"msg":"Combination of optional parameters invalid."}
'-2010': ExchangeError, # generic error code for createOrder -> 'Account has insufficient balance for requested action.', {"code":-2010,"msg":"Rest API trading is not enabled."}, etc...
'-2011': OrderNotFound, # cancelOrder(1, 'BTC/USDT') -> 'UNKNOWN_ORDER'
'-2013': OrderNotFound, # fetchOrder(1, 'BTC/USDT') -> 'Order does not exist'
'-2014': AuthenticationError, # {"code":-2014, "msg": "API-key format invalid."}
'-2015': AuthenticationError, # "Invalid API-key, IP, or permissions for action."
},
})
def nonce(self):
return self.milliseconds() - self.options['timeDifference']
async def fetch_time(self, params={}):
type = self.safe_string_2(self.options, 'fetchTime', 'defaultType', 'spot')
method = 'publicGetTime' if (type == 'spot') else 'fapiPublicGetTime'
response = await getattr(self, method)(params)
return self.safe_float(response, 'serverTime')
async def load_time_difference(self):
serverTime = await self.fetch_time()
after = self.milliseconds()
self.options['timeDifference'] = int(after - serverTime)
return self.options['timeDifference']
async def fetch_markets(self, params={}):
defaultType = self.safe_string_2(self.options, 'fetchMarkets', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = 'publicGetExchangeInfo' if (type == 'spot') else 'fapiPublicGetExchangeInfo'
response = await getattr(self, method)(query)
#
# spot
#
# {
# "timezone":"UTC",
# "serverTime":1575416692969,
# "rateLimits":[
# {"rateLimitType":"REQUEST_WEIGHT","interval":"MINUTE","intervalNum":1,"limit":1200},
# {"rateLimitType":"ORDERS","interval":"SECOND","intervalNum":10,"limit":100},
# {"rateLimitType":"ORDERS","interval":"DAY","intervalNum":1,"limit":200000}
# ],
# "exchangeFilters":[],
# "symbols":[
# {
# "symbol":"ETHBTC",
# "status":"TRADING",
# "baseAsset":"ETH",
# "baseAssetPrecision":8,
# "quoteAsset":"BTC",
# "quotePrecision":8,
# "baseCommissionPrecision":8,
# "quoteCommissionPrecision":8,
# "orderTypes":["LIMIT","LIMIT_MAKER","MARKET","STOP_LOSS_LIMIT","TAKE_PROFIT_LIMIT"],
# "icebergAllowed":true,
# "ocoAllowed":true,
# "quoteOrderQtyMarketAllowed":true,
# "isSpotTradingAllowed":true,
# "isMarginTradingAllowed":true,
# "filters":[
# {"filterType":"PRICE_FILTER","minPrice":"0.00000100","maxPrice":"100000.00000000","tickSize":"0.00000100"},
# {"filterType":"PERCENT_PRICE","multiplierUp":"5","multiplierDown":"0.2","avgPriceMins":5},
# {"filterType":"LOT_SIZE","minQty":"0.00100000","maxQty":"100000.00000000","stepSize":"0.00100000"},
# {"filterType":"MIN_NOTIONAL","minNotional":"0.00010000","applyToMarket":true,"avgPriceMins":5},
# {"filterType":"ICEBERG_PARTS","limit":10},
# {"filterType":"MARKET_LOT_SIZE","minQty":"0.00000000","maxQty":"63100.00000000","stepSize":"0.00000000"},
# {"filterType":"MAX_NUM_ALGO_ORDERS","maxNumAlgoOrders":5}
# ]
# },
# ],
# }
#
# futures(fapi)
#
# {
# "timezone":"UTC",
# "serverTime":1575417244353,
# "rateLimits":[
# {"rateLimitType":"REQUEST_WEIGHT","interval":"MINUTE","intervalNum":1,"limit":1200},
# {"rateLimitType":"ORDERS","interval":"MINUTE","intervalNum":1,"limit":1200}
# ],
# "exchangeFilters":[],
# "symbols":[
# {
# "symbol":"BTCUSDT",
# "status":"TRADING",
# "maintMarginPercent":"2.5000",
# "requiredMarginPercent":"5.0000",
# "baseAsset":"BTC",
# "quoteAsset":"USDT",
# "pricePrecision":2,
# "quantityPrecision":3,
# "baseAssetPrecision":8,
# "quotePrecision":8,
# "filters":[
# {"minPrice":"0.01","maxPrice":"100000","filterType":"PRICE_FILTER","tickSize":"0.01"},
# {"stepSize":"0.001","filterType":"LOT_SIZE","maxQty":"1000","minQty":"0.001"},
# {"stepSize":"0.001","filterType":"MARKET_LOT_SIZE","maxQty":"1000","minQty":"0.001"},
# {"limit":200,"filterType":"MAX_NUM_ORDERS"},
# {"multiplierDown":"0.8500","multiplierUp":"1.1500","multiplierDecimal":"4","filterType":"PERCENT_PRICE"}
# ],
# "orderTypes":["LIMIT","MARKET","STOP"],
# "timeInForce":["GTC","IOC","FOK","GTX"]
# }
# ]
# }
#
if self.options['adjustForTimeDifference']:
await self.load_time_difference()
markets = self.safe_value(response, 'symbols')
result = []
for i in range(0, len(markets)):
market = markets[i]
future = ('maintMarginPercent' in market)
spot = not future
marketType = 'spot' if spot else 'future'
id = self.safe_string(market, 'symbol')
baseId = market['baseAsset']
quoteId = market['quoteAsset']
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
filters = self.index_by(market['filters'], 'filterType')
precision = {
'base': market['baseAssetPrecision'],
'quote': market['quotePrecision'],
'amount': market['baseAssetPrecision'],
'price': market['quotePrecision'],
}
status = self.safe_string(market, 'status')
active = (status == 'TRADING')
entry = {
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'info': market,
'type': marketType,
'spot': spot,
'future': future,
'active': active,
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision['amount']),
'max': None,
},
'price': {
'min': None,
'max': None,
},
'cost': {
'min': -1 * math.log10(precision['amount']),
'max': None,
},
},
}
if 'PRICE_FILTER' in filters:
filter = filters['PRICE_FILTER']
# PRICE_FILTER reports zero values for maxPrice
# since they updated filter types in November 2018
# https://github.com/ccxt/ccxt/issues/4286
# therefore limits['price']['max'] doesn't have any meaningful value except None
entry['limits']['price'] = {
'min': self.safe_float(filter, 'minPrice'),
'max': None,
}
maxPrice = self.safe_float(filter, 'maxPrice')
if (maxPrice is not None) and (maxPrice > 0):
entry['limits']['price']['max'] = maxPrice
entry['precision']['price'] = self.precision_from_string(filter['tickSize'])
if 'LOT_SIZE' in filters:
filter = self.safe_value(filters, 'LOT_SIZE', {})
stepSize = self.safe_string(filter, 'stepSize')
entry['precision']['amount'] = self.precision_from_string(stepSize)
entry['limits']['amount'] = {
'min': self.safe_float(filter, 'minQty'),
'max': self.safe_float(filter, 'maxQty'),
}
if 'MIN_NOTIONAL' in filters:
entry['limits']['cost']['min'] = self.safe_float(filters['MIN_NOTIONAL'], 'minNotional')
result.append(entry)
return result
def calculate_fee(self, symbol, type, side, amount, price, takerOrMaker='taker', params={}):
market = self.markets[symbol]
key = 'quote'
rate = market[takerOrMaker]
cost = amount * rate
precision = market['precision']['price']
if side == 'sell':
cost *= price
else:
key = 'base'
precision = market['precision']['amount']
cost = self.decimal_to_precision(cost, ROUND, precision, self.precisionMode)
return {
'type': takerOrMaker,
'currency': market[key],
'rate': rate,
'cost': float(cost),
}
async def fetch_balance(self, params={}):
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchBalance', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
method = 'privateGetAccount' if (type == 'spot') else 'fapiPrivateGetAccount'
query = self.omit(params, 'type')
response = await getattr(self, method)(query)
#
# spot
#
# {
# makerCommission: 10,
# takerCommission: 10,
# buyerCommission: 0,
# sellerCommission: 0,
# canTrade: True,
# canWithdraw: True,
# canDeposit: True,
# updateTime: 1575357359602,
# accountType: "MARGIN",
# balances: [
# {asset: "BTC", free: "0.00219821", locked: "0.00000000" },
# ]
# }
#
# futures(fapi)
#
# {
# "feeTier":0,
# "canTrade":true,
# "canDeposit":true,
# "canWithdraw":true,
# "updateTime":0,
# "totalInitialMargin":"0.00000000",
# "totalMaintMargin":"0.00000000",
# "totalWalletBalance":"4.54000000",
# "totalUnrealizedProfit":"0.00000000",
# "totalMarginBalance":"4.54000000",
# "totalPositionInitialMargin":"0.00000000",
# "totalOpenOrderInitialMargin":"0.00000000",
# "maxWithdrawAmount":"4.54000000",
# "assets":[
# {
# "asset":"USDT",
# "walletBalance":"4.54000000",
# "unrealizedProfit":"0.00000000",
# "marginBalance":"4.54000000",
# "maintMargin":"0.00000000",
# "initialMargin":"0.00000000",
# "positionInitialMargin":"0.00000000",
# "openOrderInitialMargin":"0.00000000",
# "maxWithdrawAmount":"4.54000000"
# }
# ],
# "positions":[
# {
# "symbol":"BTCUSDT",
# "initialMargin":"0.00000",
# "maintMargin":"0.00000",
# "unrealizedProfit":"0.00000000",
# "positionInitialMargin":"0.00000",
# "openOrderInitialMargin":"0.00000"
# }
# ]
# }
#
result = {'info': response}
if type == 'spot':
balances = self.safe_value(response, 'balances', [])
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'asset')
code = self.safe_currency_code(currencyId)
account = self.account()
account['free'] = self.safe_float(balance, 'free')
account['used'] = self.safe_float(balance, 'locked')
result[code] = account
else:
balances = self.safe_value(response, 'assets', [])
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'asset')
code = self.safe_currency_code(currencyId)
account = self.account()
account['used'] = self.safe_float(balance, 'initialMargin')
account['total'] = self.safe_float(balance, 'marginBalance')
result[code] = account
return self.parse_balance(result)
async def fetch_order_book(self, symbol, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['limit'] = limit # default 100, max 5000, see https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#order-book
method = 'publicGetDepth' if market['spot'] else 'fapiPublicGetDepth'
response = await getattr(self, method)(self.extend(request, params))
orderbook = self.parse_order_book(response)
orderbook['nonce'] = self.safe_integer(response, 'lastUpdateId')
return orderbook
def parse_ticker(self, ticker, market=None):
timestamp = self.safe_integer(ticker, 'closeTime')
symbol = None
marketId = self.safe_string(ticker, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
last = self.safe_float(ticker, 'lastPrice')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'highPrice'),
'low': self.safe_float(ticker, 'lowPrice'),
'bid': self.safe_float(ticker, 'bidPrice'),
'bidVolume': self.safe_float(ticker, 'bidQty'),
'ask': self.safe_float(ticker, 'askPrice'),
'askVolume': self.safe_float(ticker, 'askQty'),
'vwap': self.safe_float(ticker, 'weightedAvgPrice'),
'open': self.safe_float(ticker, 'openPrice'),
'close': last,
'last': last,
'previousClose': self.safe_float(ticker, 'prevClosePrice'), # previous day close
'change': self.safe_float(ticker, 'priceChange'),
'percentage': self.safe_float(ticker, 'priceChangePercent'),
'average': None,
'baseVolume': self.safe_float(ticker, 'volume'),
'quoteVolume': self.safe_float(ticker, 'quoteVolume'),
'info': ticker,
}
async def fetch_status(self, params={}):
response = await self.wapiGetSystemStatus()
status = self.safe_value(response, 'status')
if status is not None:
status = 'ok' if (status == 0) else 'maintenance'
self.status = self.extend(self.status, {
'status': status,
'updated': self.milliseconds(),
})
return self.status
async def fetch_ticker(self, symbol, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
method = 'publicGetTicker24hr' if market['spot'] else 'fapiPublicGetTicker24hr'
response = await getattr(self, method)(self.extend(request, params))
return self.parse_ticker(response, market)
def parse_tickers(self, rawTickers, symbols=None):
tickers = []
for i in range(0, len(rawTickers)):
tickers.append(self.parse_ticker(rawTickers[i]))
return self.filter_by_array(tickers, 'symbol', symbols)
async def fetch_bids_asks(self, symbols=None, params={}):
await self.load_markets()
defaultType = self.safe_string_2(self.options, 'fetchOpenOrders', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = 'publicGetTickerBookTicker' if (type == 'spot') else 'fapiPublicGetTickerBookTicker'
response = await getattr(self, method)(query)
return self.parse_tickers(response, symbols)
async def fetch_tickers(self, symbols=None, params={}):
await self.load_markets()
method = self.options['fetchTickersMethod']
response = await getattr(self, method)(params)
return self.parse_tickers(response, symbols)
def parse_ohlcv(self, ohlcv, market=None, timeframe='1m', since=None, limit=None):
return [
ohlcv[0],
float(ohlcv[1]),
float(ohlcv[2]),
float(ohlcv[3]),
float(ohlcv[4]),
float(ohlcv[5]),
]
async def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'interval': self.timeframes[timeframe],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit # default == max == 500
method = 'publicGetKlines' if market['spot'] else 'fapiPublicGetKlines'
response = await getattr(self, method)(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def parse_trade(self, trade, market=None):
if 'isDustTrade' in trade:
return self.parse_dust_trade(trade, market)
#
# aggregate trades
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#compressedaggregate-trades-list
#
# {
# "a": 26129, # Aggregate tradeId
# "p": "0.01633102", # Price
# "q": "4.70443515", # Quantity
# "f": 27781, # First tradeId
# "l": 27781, # Last tradeId
# "T": 1498793709153, # Timestamp
# "m": True, # Was the buyer the maker?
# "M": True # Was the trade the best price match?
# }
#
# recent public trades and old public trades
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#recent-trades-list
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#old-trade-lookup-market_data
#
# {
# "id": 28457,
# "price": "4.00000100",
# "qty": "12.00000000",
# "time": 1499865549590,
# "isBuyerMaker": True,
# "isBestMatch": True
# }
#
# private trades
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/rest-api.md#account-trade-list-user_data
#
# {
# "symbol": "BNBBTC",
# "id": 28457,
# "orderId": 100234,
# "price": "4.00000100",
# "qty": "12.00000000",
# "commission": "10.10000000",
# "commissionAsset": "BNB",
# "time": 1499865549590,
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True
# }
#
# futures trades
# https://binance-docs.github.io/apidocs/futures/en/#account-trade-list-user_data
#
# {
# "accountId": 20,
# "buyer": False,
# "commission": "-0.07819010",
# "commissionAsset": "USDT",
# "counterPartyId": 653,
# "id": 698759,
# "maker": False,
# "orderId": 25851813,
# "price": "7819.01",
# "qty": "0.002",
# "quoteQty": "0.01563",
# "realizedPnl": "-0.91539999",
# "side": "SELL",
# "symbol": "BTCUSDT",
# "time": 1569514978020
# }
#
timestamp = self.safe_integer_2(trade, 'T', 'time')
price = self.safe_float_2(trade, 'p', 'price')
amount = self.safe_float_2(trade, 'q', 'qty')
id = self.safe_string_2(trade, 'a', 'id')
side = None
orderId = self.safe_string(trade, 'orderId')
if 'm' in trade:
side = 'sell' if trade['m'] else 'buy' # self is reversed intentionally
elif 'isBuyerMaker' in trade:
side = 'sell' if trade['isBuyerMaker'] else 'buy'
elif 'side' in trade:
side = self.safe_string_lower(trade, 'side')
else:
if 'isBuyer' in trade:
side = 'buy' if trade['isBuyer'] else 'sell' # self is a True side
fee = None
if 'commission' in trade:
fee = {
'cost': self.safe_float(trade, 'commission'),
'currency': self.safe_currency_code(self.safe_string(trade, 'commissionAsset')),
}
takerOrMaker = None
if 'isMaker' in trade:
takerOrMaker = 'maker' if trade['isMaker'] else 'taker'
symbol = None
if market is None:
marketId = self.safe_string(trade, 'symbol')
market = self.safe_value(self.markets_by_id, marketId)
if market is not None:
symbol = market['symbol']
return {
'info': trade,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': id,
'order': orderId,
'type': None,
'takerOrMaker': takerOrMaker,
'side': side,
'price': price,
'amount': amount,
'cost': price * amount,
'fee': fee,
}
async def fetch_trades(self, symbol, since=None, limit=None, params={}):
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
# 'fromId': 123, # ID to get aggregate trades from INCLUSIVE.
# 'startTime': 456, # Timestamp in ms to get aggregate trades from INCLUSIVE.
# 'endTime': 789, # Timestamp in ms to get aggregate trades until INCLUSIVE.
# 'limit': 500, # default = 500, maximum = 1000
}
if self.options['fetchTradesMethod'] == 'publicGetAggTrades':
if since is not None:
request['startTime'] = since
request['endTime'] = self.sum(since, 3600000)
if limit is not None:
request['limit'] = limit # default = 500, maximum = 1000
#
# Caveats:
# - default limit(500) applies only if no other parameters set, trades up
# to the maximum limit may be returned to satisfy other parameters
# - if both limit and time window is set and time window contains more
# trades than the limit then the last trades from the window are returned
# - 'tradeId' accepted and returned by self method is "aggregate" trade id
# which is different from actual trade id
# - setting both fromId and time window results in error
method = self.safe_value(self.options, 'fetchTradesMethod', 'publicGetTrades')
response = await getattr(self, method)(self.extend(request, params))
#
# aggregate trades
#
# [
# {
# "a": 26129, # Aggregate tradeId
# "p": "0.01633102", # Price
# "q": "4.70443515", # Quantity
# "f": 27781, # First tradeId
# "l": 27781, # Last tradeId
# "T": 1498793709153, # Timestamp
# "m": True, # Was the buyer the maker?
# "M": True # Was the trade the best price match?
# }
# ]
#
# recent public trades and historical public trades
#
# [
# {
# "id": 28457,
# "price": "4.00000100",
# "qty": "12.00000000",
# "time": 1499865549590,
# "isBuyerMaker": True,
# "isBestMatch": True
# }
# ]
#
return self.parse_trades(response, market, since, limit)
def parse_order_status(self, status):
statuses = {
'NEW': 'open',
'PARTIALLY_FILLED': 'open',
'FILLED': 'closed',
'CANCELED': 'canceled',
'PENDING_CANCEL': 'canceling', # currently unused
'REJECTED': 'rejected',
'EXPIRED': 'expired',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
status = self.parse_order_status(self.safe_string(order, 'status'))
symbol = None
marketId = self.safe_string(order, 'symbol')
if marketId in self.markets_by_id:
market = self.markets_by_id[marketId]
if market is not None:
symbol = market['symbol']
timestamp = None
if 'time' in order:
timestamp = self.safe_integer(order, 'time')
elif 'transactTime' in order:
timestamp = self.safe_integer(order, 'transactTime')
price = self.safe_float(order, 'price')
amount = self.safe_float(order, 'origQty')
filled = self.safe_float(order, 'executedQty')
remaining = None
# - Spot/Margin market: cummulativeQuoteQty
# - Futures market: cumQuote.
# Note self is not the actual cost, since Binance futures uses leverage to calculate margins.
cost = self.safe_float_2(order, 'cummulativeQuoteQty', 'cumQuote')
if filled is not None:
if amount is not None:
remaining = amount - filled
if self.options['parseOrderToPrecision']:
remaining = float(self.amount_to_precision(symbol, remaining))
remaining = max(remaining, 0.0)
if price is not None:
if cost is None:
cost = price * filled
id = self.safe_string(order, 'orderId')
type = self.safe_string_lower(order, 'type')
if type == 'market':
if price == 0.0:
if (cost is not None) and (filled is not None):
if (cost > 0) and (filled > 0):
price = cost / filled
if self.options['parseOrderToPrecision']:
price = float(self.price_to_precision(symbol, price))
side = self.safe_string_lower(order, 'side')
fee = None
trades = None
fills = self.safe_value(order, 'fills')
if fills is not None:
trades = self.parse_trades(fills, market)
numTrades = len(trades)
if numTrades > 0:
cost = trades[0]['cost']
fee = {
'cost': trades[0]['fee']['cost'],
'currency': trades[0]['fee']['currency'],
}
for i in range(1, len(trades)):
cost = self.sum(cost, trades[i]['cost'])
fee['cost'] = self.sum(fee['cost'], trades[i]['fee']['cost'])
average = None
if cost is not None:
if filled:
average = cost / filled
if self.options['parseOrderToPrecision']:
average = float(self.price_to_precision(symbol, average))
if self.options['parseOrderToPrecision']:
cost = float(self.cost_to_precision(symbol, cost))
return {
'info': order,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'cost': cost,
'average': average,
'filled': filled,
'remaining': remaining,
'status': status,
'fee': fee,
'trades': trades,
}
async def create_order(self, symbol, type, side, amount, price=None, params={}):
await self.load_markets()
market = self.market(symbol)
# the next 5 lines are added to support for testing orders
method = 'privatePostOrder' if market['spot'] else 'fapiPrivatePostOrder'
if market['spot']:
test = self.safe_value(params, 'test', False)
if test:
method += 'Test'
params = self.omit(params, 'test')
uppercaseType = type.upper()
validOrderTypes = self.safe_value(market['info'], 'orderTypes')
if not self.in_array(uppercaseType, validOrderTypes):
raise InvalidOrder(self.id + ' ' + type + ' is not a valid order type in ' + market['type'] + ' market ' + symbol)
request = {
'symbol': market['id'],
'quantity': self.amount_to_precision(symbol, amount),
'type': uppercaseType,
'side': side.upper(),
}
if market['spot']:
request['newOrderRespType'] = self.safe_value(self.options['newOrderRespType'], type, 'RESULT') # 'ACK' for order id, 'RESULT' for full order or 'FULL' for order with fills
timeInForceIsRequired = False
priceIsRequired = False
stopPriceIsRequired = False
if uppercaseType == 'LIMIT':
priceIsRequired = True
timeInForceIsRequired = True
elif (uppercaseType == 'STOP_LOSS') or (uppercaseType == 'TAKE_PROFIT'):
stopPriceIsRequired = True
elif (uppercaseType == 'STOP_LOSS_LIMIT') or (uppercaseType == 'TAKE_PROFIT_LIMIT'):
stopPriceIsRequired = True
priceIsRequired = True
timeInForceIsRequired = True
elif uppercaseType == 'LIMIT_MAKER':
priceIsRequired = True
elif uppercaseType == 'STOP':
stopPriceIsRequired = True
priceIsRequired = True
if priceIsRequired:
if price is None:
raise InvalidOrder(self.id + ' createOrder method requires a price argument for a ' + type + ' order')
request['price'] = self.price_to_precision(symbol, price)
if timeInForceIsRequired:
request['timeInForce'] = self.options['defaultTimeInForce'] # 'GTC' = Good To Cancel(default), 'IOC' = Immediate Or Cancel
if stopPriceIsRequired:
stopPrice = self.safe_float(params, 'stopPrice')
if stopPrice is None:
raise InvalidOrder(self.id + ' createOrder method requires a stopPrice extra param for a ' + type + ' order')
else:
params = self.omit(params, 'stopPrice')
request['stopPrice'] = self.price_to_precision(symbol, stopPrice)
response = await getattr(self, method)(self.extend(request, params))
return self.parse_order(response, market)
async def fetch_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrder requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
method = 'privateGetOrder' if market['spot'] else 'fapiPrivateGetOrder'
request = {
'symbol': market['id'],
}
origClientOrderId = self.safe_value(params, 'origClientOrderId')
if origClientOrderId is not None:
request['origClientOrderId'] = origClientOrderId
else:
request['orderId'] = int(id)
response = await getattr(self, method)(self.extend(request, params))
return self.parse_order(response, market)
async def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOrders requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
method = 'privateGetAllOrders' if market['spot'] else 'fapiPrivateGetAllOrders'
response = await getattr(self, method)(self.extend(request, params))
#
# Spot:
# [
# {
# "symbol": "LTCBTC",
# "orderId": 1,
# "clientOrderId": "myOrder1",
# "price": "0.1",
# "origQty": "1.0",
# "executedQty": "0.0",
# "cummulativeQuoteQty": "0.0",
# "status": "NEW",
# "timeInForce": "GTC",
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": "0.0",
# "icebergQty": "0.0",
# "time": 1499827319559,
# "updateTime": 1499827319559,
# "isWorking": True
# }
# ]
#
# Futures:
# [
# {
# "symbol": "BTCUSDT",
# "orderId": 1,
# "clientOrderId": "myOrder1",
# "price": "0.1",
# "origQty": "1.0",
# "executedQty": "1.0",
# "cumQuote": "10.0",
# "status": "NEW",
# "timeInForce": "GTC",
# "type": "LIMIT",
# "side": "BUY",
# "stopPrice": "0.0",
# "updateTime": 1499827319559
# }
# ]
#
return self.parse_orders(response, market, since, limit)
async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
await self.load_markets()
market = None
query = None
type = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
type = market['type']
query = params
elif self.options['warnOnFetchOpenOrdersWithoutSymbol']:
symbols = self.symbols
numSymbols = len(symbols)
fetchOpenOrdersRateLimit = int(numSymbols / 2)
raise ExchangeError(self.id + ' fetchOpenOrders WARNING: fetching open orders without specifying a symbol is rate-limited to one call per ' + str(fetchOpenOrdersRateLimit) + ' seconds. Do not call self method frequently to avoid ban. Set ' + self.id + '.options["warnOnFetchOpenOrdersWithoutSymbol"] = False to suppress self warning message.')
else:
defaultType = self.safe_string_2(self.options, 'fetchOpenOrders', 'defaultType', 'spot')
type = self.safe_string(params, 'type', defaultType)
query = self.omit(params, 'type')
method = 'privateGetOpenOrders' if (type == 'spot') else 'fapiPrivateGetOpenOrders'
response = await getattr(self, method)(self.extend(request, query))
return self.parse_orders(response, market, since, limit)
async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
orders = await self.fetch_orders(symbol, since, limit, params)
return self.filter_by(orders, 'status', 'closed')
async def cancel_order(self, id, symbol=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' cancelOrder requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'orderId': int(id),
# 'origClientOrderId': id,
}
method = 'privateDeleteOrder' if market['spot'] else 'fapiPrivateDeleteOrder'
response = await getattr(self, method)(self.extend(request, params))
return self.parse_order(response)
async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchMyTrades requires a symbol argument')
await self.load_markets()
market = self.market(symbol)
method = 'privateGetMyTrades' if market['spot'] else 'fapiPrivateGetUserTrades'
request = {
'symbol': market['id'],
}
if since is not None:
request['startTime'] = since
if limit is not None:
request['limit'] = limit
response = await getattr(self, method)(self.extend(request, params))
#
# spot trade
# [
# {
# "symbol": "BNBBTC",
# "id": 28457,
# "orderId": 100234,
# "price": "4.00000100",
# "qty": "12.00000000",
# "commission": "10.10000000",
# "commissionAsset": "BNB",
# "time": 1499865549590,
# "isBuyer": True,
# "isMaker": False,
# "isBestMatch": True,
# }
# ]
#
# futures trade
#
# [
# {
# "accountId": 20,
# "buyer": False,
# "commission": "-0.07819010",
# "commissionAsset": "USDT",
# "counterPartyId": 653,
# "id": 698759,
# "maker": False,
# "orderId": 25851813,
# "price": "7819.01",
# "qty": "0.002",
# "quoteQty": "0.01563",
# "realizedPnl": "-0.91539999",
# "side": "SELL",
# "symbol": "BTCUSDT",
# "time": 1569514978020
# }
# ]
return self.parse_trades(response, market, since, limit)
async def fetch_my_dust_trades(self, symbol=None, since=None, limit=None, params={}):
#
# Binance provides an opportunity to trade insignificant(i.e. non-tradable and non-withdrawable)
# token leftovers(of any asset) into `BNB` coin which in turn can be used to pay trading fees with it.
# The corresponding trades history is called the `Dust Log` and can be requested via the following end-point:
# https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#dustlog-user_data
#
await self.load_markets()
response = await self.wapiGetUserAssetDribbletLog(params)
# {success: True,
# results: {total: 1,
# rows: [{ transfered_total: "1.06468458",
# service_charge_total: "0.02172826",
# tran_id: 2701371634,
# logs: [{ tranId: 2701371634,
# serviceChargeAmount: "0.00012819",
# uid: "35103861",
# amount: "0.8012",
# operateTime: "2018-10-07 17:56:07",
# transferedAmount: "0.00628141",
# fromAsset: "ADA" }],
# operate_time: "2018-10-07 17:56:06" }]}}
results = self.safe_value(response, 'results', {})
rows = self.safe_value(results, 'rows', [])
data = []
for i in range(0, len(rows)):
logs = rows[i]['logs']
for j in range(0, len(logs)):
logs[j]['isDustTrade'] = True
data.append(logs[j])
trades = self.parse_trades(data, None, since, limit)
return self.filter_by_since_limit(trades, since, limit)
def parse_dust_trade(self, trade, market=None):
# { tranId: 2701371634,
# serviceChargeAmount: "0.00012819",
# uid: "35103861",
# amount: "0.8012",
# operateTime: "2018-10-07 17:56:07",
# transferedAmount: "0.00628141",
# fromAsset: "ADA" },
orderId = self.safe_string(trade, 'tranId')
timestamp = self.parse8601(self.safe_string(trade, 'operateTime'))
tradedCurrency = self.safe_currency_code(self.safe_string(trade, 'fromAsset'))
earnedCurrency = self.currency('BNB')['code']
applicantSymbol = earnedCurrency + '/' + tradedCurrency
tradedCurrencyIsQuote = False
if applicantSymbol in self.markets:
tradedCurrencyIsQuote = True
#
# Warning
# Binance dust trade `fee` is already excluded from the `BNB` earning reported in the `Dust Log`.
# So the parser should either set the `fee.cost` to `0` or add it on top of the earned
# BNB `amount`(or `cost` depending on the trade `side`). The second of the above options
# is much more illustrative and therefore preferable.
#
fee = {
'currency': earnedCurrency,
'cost': self.safe_float(trade, 'serviceChargeAmount'),
}
symbol = None
amount = None
cost = None
side = None
if tradedCurrencyIsQuote:
symbol = applicantSymbol
amount = self.sum(self.safe_float(trade, 'transferedAmount'), fee['cost'])
cost = self.safe_float(trade, 'amount')
side = 'buy'
else:
symbol = tradedCurrency + '/' + earnedCurrency
amount = self.safe_float(trade, 'amount')
cost = self.sum(self.safe_float(trade, 'transferedAmount'), fee['cost'])
side = 'sell'
price = None
if cost is not None:
if amount:
price = cost / amount
id = None
type = None
takerOrMaker = None
return {
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'order': orderId,
'type': type,
'takerOrMaker': takerOrMaker,
'side': side,
'amount': amount,
'price': price,
'cost': cost,
'fee': fee,
'info': trade,
}
async def fetch_deposits(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
currency = None
request = {}
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['startTime'] = since
response = await self.wapiGetDepositHistory(self.extend(request, params))
#
# { success: True,
# depositList: [{insertTime: 1517425007000,
# amount: 0.3,
# address: "0x0123456789abcdef",
# addressTag: "",
# txId: "0x0123456789abcdef",
# asset: "ETH",
# status: 1 }]}
#
return self.parse_transactions(response['depositList'], currency, since, limit)
async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
await self.load_markets()
currency = None
request = {}
if code is not None:
currency = self.currency(code)
request['asset'] = currency['id']
if since is not None:
request['startTime'] = since
response = await self.wapiGetWithdrawHistory(self.extend(request, params))
#
# {withdrawList: [{ amount: 14,
# address: "0x0123456789abcdef...",
# successTime: 1514489710000,
# transactionFee: 0.01,
# addressTag: "",
# txId: "0x0123456789abcdef...",
# id: "0123456789abcdef...",
# asset: "ETH",
# applyTime: 1514488724000,
# status: 6 },
# { amount: 7600,
# address: "0x0123456789abcdef...",
# successTime: 1515323226000,
# transactionFee: 0.01,
# addressTag: "",
# txId: "0x0123456789abcdef...",
# id: "0123456789abcdef...",
# asset: "ICN",
# applyTime: 1515322539000,
# status: 6 } ],
# success: True }
#
return self.parse_transactions(response['withdrawList'], currency, since, limit)
def parse_transaction_status_by_type(self, status, type=None):
if type is None:
return status
statuses = {
'deposit': {
'0': 'pending',
'1': 'ok',
},
'withdrawal': {
'0': 'pending', # Email Sent
'1': 'canceled', # Cancelled(different from 1 = ok in deposits)
'2': 'pending', # Awaiting Approval
'3': 'failed', # Rejected
'4': 'pending', # Processing
'5': 'failed', # Failure
'6': 'ok', # Completed
},
}
return statuses[type][status] if (status in statuses[type]) else status
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
# {insertTime: 1517425007000,
# amount: 0.3,
# address: "0x0123456789abcdef",
# addressTag: "",
# txId: "0x0123456789abcdef",
# asset: "ETH",
# status: 1 }
#
# fetchWithdrawals
#
# { amount: 14,
# address: "0x0123456789abcdef...",
# successTime: 1514489710000,
# transactionFee: 0.01,
# addressTag: "",
# txId: "0x0123456789abcdef...",
# id: "0123456789abcdef...",
# asset: "ETH",
# applyTime: 1514488724000,
# status: 6 }
#
id = self.safe_string(transaction, 'id')
address = self.safe_string(transaction, 'address')
tag = self.safe_string(transaction, 'addressTag') # set but unused
if tag is not None:
if len(tag) < 1:
tag = None
txid = self.safe_value(transaction, 'txId')
currencyId = self.safe_string(transaction, 'asset')
code = self.safe_currency_code(currencyId, currency)
timestamp = None
insertTime = self.safe_integer(transaction, 'insertTime')
applyTime = self.safe_integer(transaction, 'applyTime')
type = self.safe_string(transaction, 'type')
if type is None:
if (insertTime is not None) and (applyTime is None):
type = 'deposit'
timestamp = insertTime
elif (insertTime is None) and (applyTime is not None):
type = 'withdrawal'
timestamp = applyTime
status = self.parse_transaction_status_by_type(self.safe_string(transaction, 'status'), type)
amount = self.safe_float(transaction, 'amount')
feeCost = self.safe_float(transaction, 'transactionFee')
fee = None
if feeCost is not None:
fee = {'currency': code, 'cost': feeCost}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': address,
'tag': tag,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': None,
'fee': fee,
}
async def fetch_deposit_address(self, code, params={}):
await self.load_markets()
currency = self.currency(code)
request = {
'asset': currency['id'],
}
response = await self.wapiGetDepositAddress(self.extend(request, params))
success = self.safe_value(response, 'success')
if (success is None) or not success:
raise InvalidAddress(self.id + ' fetchDepositAddress returned an empty response – create the deposit address in the user settings first.')
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'addressTag')
self.check_address(address)
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
async def fetch_funding_fees(self, codes=None, params={}):
response = await self.wapiGetAssetDetail(params)
#
# {
# "success": True,
# "assetDetail": {
# "CTR": {
# "minWithdrawAmount": "70.00000000", #min withdraw amount
# "depositStatus": False,//deposit status
# "withdrawFee": 35, # withdraw fee
# "withdrawStatus": True, #withdraw status
# "depositTip": "Delisted, Deposit Suspended" #reason
# },
# "SKY": {
# "minWithdrawAmount": "0.02000000",
# "depositStatus": True,
# "withdrawFee": 0.01,
# "withdrawStatus": True
# }
# }
# }
#
detail = self.safe_value(response, 'assetDetail', {})
ids = list(detail.keys())
withdrawFees = {}
for i in range(0, len(ids)):
id = ids[i]
code = self.safe_currency_code(id)
withdrawFees[code] = self.safe_float(detail[id], 'withdrawFee')
return {
'withdraw': withdrawFees,
'deposit': {},
'info': response,
}
async def withdraw(self, code, amount, address, tag=None, params={}):
self.check_address(address)
await self.load_markets()
currency = self.currency(code)
# name is optional, can be overrided via params
name = address[0:20]
request = {
'asset': currency['id'],
'address': address,
'amount': float(amount),
'name': name, # name is optional, can be overrided via params
# https://binance-docs.github.io/apidocs/spot/en/#withdraw-sapi
# issue sapiGetCapitalConfigGetall() to get networks for withdrawing USDT ERC20 vs USDT Omni
# 'network': 'ETH', # 'BTC', 'TRX', etc, optional
}
if tag is not None:
request['addressTag'] = tag
response = await self.wapiPostWithdraw(self.extend(request, params))
return {
'info': response,
'id': self.safe_string(response, 'id'),
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = self.urls['api'][api]
url += '/' + path
if api == 'wapi':
url += '.html'
userDataStream = ((path == 'userDataStream') or (path == 'listenKey'))
if path == 'historicalTrades':
headers = {
'X-MBX-APIKEY': self.apiKey,
}
elif userDataStream:
# v1 special case for userDataStream
body = self.urlencode(params)
headers = {
'X-MBX-APIKEY': self.apiKey,
'Content-Type': 'application/x-www-form-urlencoded',
}
if (api == 'private') or (api == 'sapi') or (api == 'wapi' and path != 'systemStatus') or (api == 'fapiPrivate'):
self.check_required_credentials()
query = self.urlencode(self.extend({
'timestamp': self.nonce(),
'recvWindow': self.options['recvWindow'],
}, params))
signature = self.hmac(self.encode(query), self.encode(self.secret))
query += '&' + 'signature=' + signature
headers = {
'X-MBX-APIKEY': self.apiKey,
}
if (method == 'GET') or (method == 'DELETE') or (api == 'wapi'):
url += '?' + query
else:
body = query
headers['Content-Type'] = 'application/x-www-form-urlencoded'
else:
# userDataStream endpoints are public, but POST, PUT, DELETE
# therefore they don't accept URL query arguments
# https://github.com/ccxt/ccxt/issues/5224
if not userDataStream:
if params:
url += '?' + self.urlencode(params)
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if (code == 418) or (code == 429):
raise DDoSProtection(self.id + ' ' + str(code) + ' ' + reason + ' ' + body)
# error response in a form: {"code": -1013, "msg": "Invalid quantity."}
# following block cointains legacy checks against message patterns in "msg" property
# will switch "code" checks eventually, when we know all of them
if code >= 400:
if body.find('Price * QTY is zero or less') >= 0:
raise InvalidOrder(self.id + ' order cost = amount * price is zero or less ' + body)
if body.find('LOT_SIZE') >= 0:
raise InvalidOrder(self.id + ' order amount should be evenly divisible by lot size ' + body)
if body.find('PRICE_FILTER') >= 0:
raise InvalidOrder(self.id + ' order price is invalid, i.e. exceeds allowed price precision, exceeds min price or max price limits or is invalid float value in general, use self.price_to_precision(symbol, amount) ' + body)
if len(body) > 0:
if body[0] == '{':
# check success value for wapi endpoints
# response in format {'msg': 'The coin does not exist.', 'success': True/false}
success = self.safe_value(response, 'success', True)
if not success:
message = self.safe_string(response, 'msg')
parsedMessage = None
if message is not None:
try:
parsedMessage = json.loads(message)
except Exception as e:
# do nothing
parsedMessage = None
if parsedMessage is not None:
response = parsedMessage
message = self.safe_string(response, 'msg')
if message is not None:
self.throw_exactly_matched_exception(self.exceptions, message, self.id + ' ' + message)
# checks against error codes
error = self.safe_string(response, 'code')
if error is not None:
# a workaround for {"code":-2015,"msg":"Invalid API-key, IP, or permissions for action."}
# despite that their message is very confusing, it is raised by Binance
# on a temporary ban(the API key is valid, but disabled for a while)
if (error == '-2015') and self.options['hasAlreadyAuthenticatedSuccessfully']:
raise DDoSProtection(self.id + ' temporary banned: ' + body)
feedback = self.id + ' ' + body
self.throw_exactly_matched_exception(self.exceptions, error, feedback)
raise ExchangeError(feedback)
if not success:
raise ExchangeError(self.id + ' ' + body)
async def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = await self.fetch2(path, api, method, params, headers, body)
# a workaround for {"code":-2015,"msg":"Invalid API-key, IP, or permissions for action."}
if (api == 'private') or (api == 'wapi'):
self.options['hasAlreadyAuthenticatedSuccessfully'] = True
return response
| 44.53375 | 355 | 0.481292 |
aceed729d9add33f50ff8793e4a5a48a378a620d | 15,777 | py | Python | dagobah/core/task.py | rclough/dagobah | 5480cbaae4d5d17d10d497ba657dffd7677953b0 | [
"WTFPL"
] | null | null | null | dagobah/core/task.py | rclough/dagobah | 5480cbaae4d5d17d10d497ba657dffd7677953b0 | [
"WTFPL"
] | null | null | null | dagobah/core/task.py | rclough/dagobah | 5480cbaae4d5d17d10d497ba657dffd7677953b0 | [
"WTFPL"
] | null | null | null | import os
from datetime import datetime
import threading
import subprocess
import json
import paramiko
import logging
from .components import StrictJSONEncoder
from .dagobah_error import DagobahError
logger = logging.getLogger('dagobah')
class Task(object):
""" Handles execution and reporting for an individual process.
Emitted events:
task_failed: On failure of an individual task. Returns the
current serialization of the task with run logs.
"""
def __init__(self, parent_job, command, name,
soft_timeout=0, hard_timeout=0, hostname=None):
logger.debug('Starting Task instance constructor with name {0}'.
format(name))
self.parent_job = parent_job
self.backend = self.parent_job.backend
self.event_handler = self.parent_job.event_handler
self.command = command
self.name = name
self.hostname = hostname
self.delegator = parent_job.delegator
self.delegator.commit_job(self.parent_job)
self.remote_channel = None
self.process = None
self.stdout = ""
self.stderr = ""
self.stdout_file = None
self.stderr_file = None
self.timer = None
self.started_at = None
self.completed_at = None
self.successful = None
self.terminate_sent = False
self.kill_sent = False
self.remote_failure = False
self.set_soft_timeout(soft_timeout)
self.set_hard_timeout(hard_timeout)
def set_soft_timeout(self, timeout):
logger.debug('Task {0} setting soft timeout'.format(self.name))
if not isinstance(timeout, (int, float)) or timeout < 0:
raise ValueError('timeouts must be non-negative numbers')
self.soft_timeout = timeout
self.delegator.commit_job(self.parent_job)
def set_hard_timeout(self, timeout):
logger.debug('Task {0} setting hard timeout'.format(self.name))
if not isinstance(timeout, (int, float)) or timeout < 0:
raise ValueError('timeouts must be non-negative numbers')
self.hard_timeout = timeout
self.delegator.commit_job(self.parent_job)
def set_hostname(self, hostname):
logger.debug('Task {0} setting hostname'.format(self.name))
self.hostname = hostname
self.delegator.commit_job(self.parent_job)
def reset(self):
""" Reset this Task to a clean state prior to execution. """
logger.debug('Resetting task {0}'.format(self.name))
self.stdout_file = os.tmpfile()
self.stderr_file = os.tmpfile()
self.stdout = ""
self.stderr = ""
self.started_at = None
self.completed_at = None
self.successful = None
self.terminate_sent = False
self.kill_sent = False
self.remote_failure = False
def start(self):
""" Begin execution of this task. """
logger.info('Starting task {0}'.format(self.name))
self.reset()
if self.hostname:
host = self.parent_job.parent.get_host(self.hostname)
if host:
self.remote_ssh(host)
else:
self.remote_failure = True
else:
self.process = subprocess.Popen(self.command,
shell=True,
env=os.environ.copy(),
stdout=self.stdout_file,
stderr=self.stderr_file)
self.started_at = datetime.utcnow()
self._start_check_timer()
def remote_ssh(self, host):
""" Execute a command on SSH. Takes a paramiko host dict """
logger.info('Starting remote execution of task {0} on host {1}'.
format(self.name, host['hostname']))
try:
self.remote_client = paramiko.SSHClient()
self.remote_client.load_system_host_keys()
self.remote_client.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
self.remote_client.connect(host['hostname'], username=host['user'],
key_filename=host['identityfile'][0],
timeout=82800)
transport = self.remote_client.get_transport()
transport.set_keepalive(10)
self.remote_channel = transport.open_session()
self.remote_channel.get_pty()
self.remote_channel.exec_command(self.command)
except Exception as e:
logger.warn('Exception encountered in remote task execution')
self.remote_failure = True
self.stderr += 'Exception when trying to SSH related to: '
self.stderr += '{0}: {1}\n"'.format(type(e).__name__, str(e))
self.stderr += 'Was looking for host "{0}"\n'.format(str(host))
self.stderr += 'Found in config:\n'
self.stderr += 'host: "{0}"\n'.format(str(host))
self.stderr += 'hostname: "{0}"\n'.format(str(host.get('hostname')))
self.stderr += 'user: "{0}"\n'.format(str(host.get('user')))
self.stderr += 'identityfile: "{0}"\n'.format(str(host.get('identityfile')))
self.remote_client.close()
def check_complete(self):
""" Runs completion flow for this task if it's finished. """
logger.debug('Running check_complete for task {0}'.format(self.name))
# Tasks not completed
if self.remote_not_complete() or self.local_not_complete():
self._start_check_timer()
return
return_code = self.completed_task()
# Handle task errors
if self.terminate_sent:
self.stderr += '\nDAGOBAH SENT SIGTERM TO THIS PROCESS\n'
if self.kill_sent:
self.stderr += '\nDAGOBAH SENT SIGKILL TO THIS PROCESS\n'
if self.remote_failure:
return_code = -1
self.stderr += '\nAn error occurred with the remote machine.\n'
self.stdout_file = None
self.stderr_file = None
self._task_complete(success=True if return_code == 0 else False,
return_code=return_code,
stdout=self.stdout,
stderr=self.stderr,
start_time=self.started_at,
complete_time=datetime.utcnow())
def remote_not_complete(self):
"""
Returns True if this task is on a remote channel, and on a remote
machine, False if it is either not remote or not completed
"""
if self.remote_channel and not self.remote_channel.exit_status_ready():
self._timeout_check()
# Get some stdout/std error
if self.remote_channel.recv_ready():
self.stdout += self.remote_channel.recv(1024)
if self.remote_channel.recv_stderr_ready():
self.stderr += self.remote_channel.recv_stderr(1024)
return True
return False
def local_not_complete(self):
""" Returns True if task is local and not completed"""
if self.process and self.process.poll() is None:
self._timeout_check()
return True
return False
def completed_task(self):
""" Handle wrapping up a completed task, local or remote"""
# If its remote and finished running
if self.remote_channel and self.remote_channel.exit_status_ready():
# Collect all remaining stdout/stderr
while self.remote_channel.recv_ready():
self.stdout += self.remote_channel.recv(1024)
while self.remote_channel.recv_stderr_ready():
self.stderr += self.remote_channel.recv_stderr(1024)
return self.remote_channel.recv_exit_status()
# Otherwise check for finished local command
elif self.process:
self.stdout, self.stderr = (self._read_temp_file(self.stdout_file),
self._read_temp_file(self.stderr_file))
for temp_file in [self.stdout_file, self.stderr_file]:
temp_file.close()
return self.process.returncode
def terminate(self):
""" Send SIGTERM to the task's process. """
logger.info('Sending SIGTERM to task {0}'.format(self.name))
if hasattr(self, 'remote_client') and self.remote_client is not None:
self.terminate_sent = True
self.remote_client.close()
return
if not self.process:
raise DagobahError('task does not have a running process')
self.terminate_sent = True
self.process.terminate()
def kill(self):
""" Send SIGKILL to the task's process. """
logger.info('Sending SIGKILL to task {0}'.format(self.name))
if hasattr(self, 'remote_client') and self.remote_client is not None:
self.kill_sent = True
self.remote_client.close()
return
if not self.process:
raise DagobahError('task does not have a running process')
self.kill_sent = True
self.process.kill()
def head(self, stream='stdout', num_lines=10):
""" Head a specified stream (stdout or stderr) by num_lines. """
target = self._map_string_to_file(stream)
if not target: # no current temp file
last_run = self.backend.get_latest_run_log(self.parent_job.job_id,
self.name)
if not last_run:
return None
return self._head_string(last_run['tasks'][self.name][stream],
num_lines)
else:
return self._head_temp_file(target, num_lines)
def tail(self, stream='stdout', num_lines=10):
""" Tail a specified stream (stdout or stderr) by num_lines. """
target = self._map_string_to_file(stream)
if not target: # no current temp file
last_run = self.backend.get_latest_run_log(self.parent_job.job_id,
self.name)
if not last_run:
return None
return self._tail_string(last_run['tasks'][self.name][stream],
num_lines)
else:
return self._tail_temp_file(target, num_lines)
def get_stdout(self):
""" Returns the entire stdout output of this process. """
return self._read_temp_file(self.stdout_file)
def get_stderr(self):
""" Returns the entire stderr output of this process. """
return self._read_temp_file(self.stderr_file)
def _timeout_check(self):
logger.debug('Running timeout check for task {0}'.format(self.name))
if (self.soft_timeout != 0 and
(datetime.utcnow() - self.started_at).seconds >= self.soft_timeout
and not self.terminate_sent):
self.terminate()
if (self.hard_timeout != 0 and
(datetime.utcnow() - self.started_at).seconds >= self.hard_timeout
and not self.kill_sent):
self.kill()
def get_run_log_history(self):
return self.backend.get_run_log_history(self.parent_job.job_id,
self.name)
def get_run_log(self, log_id):
return self.backend.get_run_log(self.parent_job.job_id, self.name,
log_id)
def _map_string_to_file(self, stream):
if stream not in ['stdout', 'stderr']:
raise DagobahError('stream must be stdout or stderr')
return self.stdout_file if stream == 'stdout' else self.stderr_file
def _start_check_timer(self):
""" Periodically checks to see if the task has completed. """
if self.timer:
self.timer.cancel()
self.timer = threading.Timer(2.5, self.check_complete)
self.timer.daemon = True
self.timer.start()
def _read_temp_file(self, temp_file):
""" Reads a temporary file for Popen stdout and stderr. """
temp_file.seek(0)
result = temp_file.read()
return result
def _head_string(self, in_str, num_lines):
""" Returns a list of the first num_lines lines from a string. """
return in_str.split('\n')[:num_lines]
def _tail_string(self, in_str, num_lines):
""" Returns a list of the last num_lines lines from a string. """
return in_str.split('\n')[-1 * num_lines:]
def _head_temp_file(self, temp_file, num_lines):
""" Returns a list of the first num_lines lines from a temp file. """
if not isinstance(num_lines, int):
raise DagobahError('num_lines must be an integer')
temp_file.seek(0)
result, curr_line = [], 0
for line in temp_file:
curr_line += 1
result.append(line.strip())
if curr_line >= num_lines:
break
return result
def _tail_temp_file(self, temp_file, num_lines, seek_offset=10000):
""" Returns a list of the last num_lines lines from a temp file.
This works by first moving seek_offset chars back from the end of
the file, then attempting to tail the file from there. It is
possible that fewer than num_lines will be returned, even if the
file has more total lines than num_lines.
"""
if not isinstance(num_lines, int):
raise DagobahError('num_lines must be an integer')
temp_file.seek(0, os.SEEK_END)
size = temp_file.tell()
temp_file.seek(-1 * min(size, seek_offset), os.SEEK_END)
result = []
while True:
this_line = temp_file.readline()
if this_line == '':
break
result.append(this_line.strip())
if len(result) > num_lines:
result.pop(0)
return result
def _task_complete(self, **kwargs):
""" Performs cleanup tasks and notifies Job that the Task finished. """
logger.debug('Running _task_complete for task {0}'.format(self.name))
with self.parent_job.completion_lock:
self.completed_at = datetime.utcnow()
self.successful = kwargs.get('success', None)
self.parent_job._complete_task(self.name, **kwargs)
def _serialize(self, include_run_logs=False, strict_json=False):
""" Serialize a representation of this Task to a Python dict. """
result = {'command': self.command,
'name': self.name,
'started_at': self.started_at,
'completed_at': self.completed_at,
'success': self.successful,
'soft_timeout': self.soft_timeout,
'hard_timeout': self.hard_timeout,
'hostname': self.hostname}
if include_run_logs:
last_run = self.backend.get_latest_run_log(self.parent_job.job_id,
self.name)
if last_run:
run_log = last_run.get('tasks', {}).get(self.name, {})
if run_log:
result['run_log'] = run_log
if strict_json:
result = json.loads(json.dumps(result, cls=StrictJSONEncoder))
return result
def clone(self):
cloned_task = Task(self.parent_job, self.command, self.name,
soft_timeout=self.soft_timeout,
hard_timeout=self.hard_timeout,
hostname=self.hostname)
return cloned_task
| 39.640704 | 88 | 0.589466 |
aceed7fc953e093d438cd65ea175aa6f85329243 | 933 | py | Python | tests/app/config_paths_test.py | Zsailer/voila | f523a7e5bacfe9f5757b5d50c64289774f84b96f | [
"BSD-3-Clause"
] | null | null | null | tests/app/config_paths_test.py | Zsailer/voila | f523a7e5bacfe9f5757b5d50c64289774f84b96f | [
"BSD-3-Clause"
] | 2 | 2021-05-11T23:42:51.000Z | 2022-01-22T16:55:01.000Z | tests/app/config_paths_test.py | Zsailer/voila | f523a7e5bacfe9f5757b5d50c64289774f84b96f | [
"BSD-3-Clause"
] | 1 | 2020-06-20T09:55:16.000Z | 2020-06-20T09:55:16.000Z | # test all objects that should be configurable
import pytest
import os
BASE_DIR = os.path.dirname(__file__)
@pytest.fixture
def voila_config_file_paths_arg():
path = os.path.join(BASE_DIR, '..', 'configs', 'general')
return '--VoilaTest.config_file_paths=[%r]' % path
def test_config_app(voila_app):
assert voila_app.voila_configuration.template == 'test_template'
assert voila_app.voila_configuration.enable_nbextensions is True
def test_config_kernel_manager(voila_app):
assert voila_app.kernel_manager.cull_interval == 10
def test_config_contents_manager(voila_app):
assert voila_app.contents_manager.use_atomic_writing is False
@pytest.mark.gen_test
def test_template(http_client, base_url):
response = yield http_client.fetch(base_url)
assert response.code == 200
assert 'test_template.css' in response.body.decode('utf-8')
assert 'Hi Voila' in response.body.decode('utf-8')
| 26.657143 | 68 | 0.767417 |
aceed92e27050fcf7c3dbf2139ae0d8f6420f44a | 1,155 | py | Python | test/comparison/test_array_formula.py | LuaDist-testing/xlsxwriter | 2cb37d0c19413699d7e47013858b44e1ff6b34bb | [
"MIT"
] | 2 | 2016-04-10T21:08:57.000Z | 2020-12-17T19:10:50.000Z | test/comparison/test_array_formula.py | LuaDist-testing/xlsxwriter | 2cb37d0c19413699d7e47013858b44e1ff6b34bb | [
"MIT"
] | 1 | 2022-01-13T13:02:30.000Z | 2022-01-13T13:02:30.000Z | test/comparison/test_array_formula.py | isabella232/xlsxwriter | 21542b5d672f97ae69e0d36164bd7f67b1c532cd | [
"MIT"
] | 6 | 2016-01-21T02:28:37.000Z | 2019-08-30T10:40:35.000Z | ###############################################################################
#
# Test cases for xlsxwriter.lua.
#
# Copyright (c), 2014, John McNamara, jmcnamara@cpan.org
#
import base_test_class
class TestCompareXLSXFiles(base_test_class.XLSXBaseTest):
"""
Test file created with xlsxwriter.lua against a file created by Excel.
These tests check array formulas.
"""
def test_array_formula01(self):
self.ignore_files = ['xl/calcChain.xml',
'[Content_Types].xml',
'xl/_rels/workbook.xml.rels']
self.run_lua_test('test_array_formula01')
def test_array_formula02(self):
self.ignore_files = ['xl/calcChain.xml',
'[Content_Types].xml',
'xl/_rels/workbook.xml.rels']
self.run_lua_test('test_array_formula02')
def test_array_formula03(self):
self.ignore_files = ['xl/calcChain.xml',
'[Content_Types].xml',
'xl/_rels/workbook.xml.rels']
self.run_lua_test('test_array_formula03', 'array_formula01.xlsx')
| 33 | 79 | 0.552381 |
aceed99a5d9369d429c1290dfef98ca94cabbb65 | 33,335 | py | Python | lib/sqlalchemy/sql/traversals.py | jmg-duarte/sqlalchemy | 47c91d06b56b0a0cf366d3c1f8b6d71a82149e43 | [
"MIT"
] | null | null | null | lib/sqlalchemy/sql/traversals.py | jmg-duarte/sqlalchemy | 47c91d06b56b0a0cf366d3c1f8b6d71a82149e43 | [
"MIT"
] | null | null | null | lib/sqlalchemy/sql/traversals.py | jmg-duarte/sqlalchemy | 47c91d06b56b0a0cf366d3c1f8b6d71a82149e43 | [
"MIT"
] | 1 | 2022-02-28T20:16:29.000Z | 2022-02-28T20:16:29.000Z | # sql/traversals.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
# mypy: allow-untyped-defs, allow-untyped-calls
from __future__ import annotations
from collections import deque
import collections.abc as collections_abc
import itertools
from itertools import zip_longest
import operator
import typing
from typing import Any
from typing import Callable
from typing import Deque
from typing import Dict
from typing import Iterable
from typing import Set
from typing import Tuple
from typing import Type
from typing import TypeVar
from . import operators
from .cache_key import HasCacheKey
from .visitors import _TraverseInternalsType
from .visitors import anon_map
from .visitors import ExternallyTraversible
from .visitors import HasTraversalDispatch
from .visitors import HasTraverseInternals
from .. import util
from ..util import langhelpers
SKIP_TRAVERSE = util.symbol("skip_traverse")
COMPARE_FAILED = False
COMPARE_SUCCEEDED = True
def compare(obj1, obj2, **kw):
strategy: TraversalComparatorStrategy
if kw.get("use_proxies", False):
strategy = ColIdentityComparatorStrategy()
else:
strategy = TraversalComparatorStrategy()
return strategy.compare(obj1, obj2, **kw)
def _preconfigure_traversals(target_hierarchy):
for cls in util.walk_subclasses(target_hierarchy):
if hasattr(cls, "_generate_cache_attrs") and hasattr(
cls, "_traverse_internals"
):
cls._generate_cache_attrs() # type: ignore
_copy_internals.generate_dispatch(
cls, # type: ignore
cls._traverse_internals, # type: ignore
"_generated_copy_internals_traversal",
)
_get_children.generate_dispatch(
cls, # type: ignore
cls._traverse_internals, # type: ignore
"_generated_get_children_traversal",
)
SelfHasShallowCopy = TypeVar("SelfHasShallowCopy", bound="HasShallowCopy")
class HasShallowCopy(HasTraverseInternals):
"""attribute-wide operations that are useful for classes that use
__slots__ and therefore can't operate on their attributes in a dictionary.
"""
__slots__ = ()
if typing.TYPE_CHECKING:
def _generated_shallow_copy_traversal(
self: SelfHasShallowCopy, other: SelfHasShallowCopy
) -> None:
...
def _generated_shallow_from_dict_traversal(
self, d: Dict[str, Any]
) -> None:
...
def _generated_shallow_to_dict_traversal(self) -> Dict[str, Any]:
...
@classmethod
def _generate_shallow_copy(
cls: Type[SelfHasShallowCopy],
internal_dispatch: _TraverseInternalsType,
method_name: str,
) -> Callable[[SelfHasShallowCopy, SelfHasShallowCopy], None]:
code = "\n".join(
f" other.{attrname} = self.{attrname}"
for attrname, _ in internal_dispatch
)
meth_text = f"def {method_name}(self, other):\n{code}\n"
return langhelpers._exec_code_in_env(meth_text, {}, method_name)
@classmethod
def _generate_shallow_to_dict(
cls: Type[SelfHasShallowCopy],
internal_dispatch: _TraverseInternalsType,
method_name: str,
) -> Callable[[SelfHasShallowCopy], Dict[str, Any]]:
code = ",\n".join(
f" '{attrname}': self.{attrname}"
for attrname, _ in internal_dispatch
)
meth_text = f"def {method_name}(self):\n return {{{code}}}\n"
return langhelpers._exec_code_in_env(meth_text, {}, method_name)
@classmethod
def _generate_shallow_from_dict(
cls: Type[SelfHasShallowCopy],
internal_dispatch: _TraverseInternalsType,
method_name: str,
) -> Callable[[SelfHasShallowCopy, Dict[str, Any]], None]:
code = "\n".join(
f" self.{attrname} = d['{attrname}']"
for attrname, _ in internal_dispatch
)
meth_text = f"def {method_name}(self, d):\n{code}\n"
return langhelpers._exec_code_in_env(meth_text, {}, method_name)
def _shallow_from_dict(self, d: Dict[str, Any]) -> None:
cls = self.__class__
shallow_from_dict: Callable[[HasShallowCopy, Dict[str, Any]], None]
try:
shallow_from_dict = cls.__dict__[
"_generated_shallow_from_dict_traversal"
]
except KeyError:
shallow_from_dict = self._generate_shallow_from_dict(
cls._traverse_internals,
"_generated_shallow_from_dict_traversal",
)
cls._generated_shallow_from_dict_traversal = shallow_from_dict # type: ignore # noqa: E501
shallow_from_dict(self, d)
def _shallow_to_dict(self) -> Dict[str, Any]:
cls = self.__class__
shallow_to_dict: Callable[[HasShallowCopy], Dict[str, Any]]
try:
shallow_to_dict = cls.__dict__[
"_generated_shallow_to_dict_traversal"
]
except KeyError:
shallow_to_dict = self._generate_shallow_to_dict(
cls._traverse_internals, "_generated_shallow_to_dict_traversal"
)
cls._generated_shallow_to_dict_traversal = shallow_to_dict # type: ignore # noqa: E501
return shallow_to_dict(self)
def _shallow_copy_to(
self: SelfHasShallowCopy, other: SelfHasShallowCopy
) -> None:
cls = self.__class__
shallow_copy: Callable[[SelfHasShallowCopy, SelfHasShallowCopy], None]
try:
shallow_copy = cls.__dict__["_generated_shallow_copy_traversal"]
except KeyError:
shallow_copy = self._generate_shallow_copy(
cls._traverse_internals, "_generated_shallow_copy_traversal"
)
cls._generated_shallow_copy_traversal = shallow_copy # type: ignore # noqa: E501
shallow_copy(self, other)
def _clone(self: SelfHasShallowCopy, **kw: Any) -> SelfHasShallowCopy:
"""Create a shallow copy"""
c = self.__class__.__new__(self.__class__)
self._shallow_copy_to(c)
return c
SelfGenerativeOnTraversal = TypeVar(
"SelfGenerativeOnTraversal", bound="GenerativeOnTraversal"
)
class GenerativeOnTraversal(HasShallowCopy):
"""Supplies Generative behavior but making use of traversals to shallow
copy.
.. seealso::
:class:`sqlalchemy.sql.base.Generative`
"""
__slots__ = ()
def _generate(
self: SelfGenerativeOnTraversal,
) -> SelfGenerativeOnTraversal:
cls = self.__class__
s = cls.__new__(cls)
self._shallow_copy_to(s)
return s
def _clone(element, **kw):
return element._clone()
class HasCopyInternals(HasTraverseInternals):
__slots__ = ()
def _clone(self, **kw):
raise NotImplementedError()
def _copy_internals(
self, *, omit_attrs: Iterable[str] = (), **kw: Any
) -> None:
"""Reassign internal elements to be clones of themselves.
Called during a copy-and-traverse operation on newly
shallow-copied elements to create a deep copy.
The given clone function should be used, which may be applying
additional transformations to the element (i.e. replacement
traversal, cloned traversal, annotations).
"""
try:
traverse_internals = self._traverse_internals
except AttributeError:
# user-defined classes may not have a _traverse_internals
return
for attrname, obj, meth in _copy_internals.run_generated_dispatch(
self, traverse_internals, "_generated_copy_internals_traversal"
):
if attrname in omit_attrs:
continue
if obj is not None:
result = meth(attrname, self, obj, **kw)
if result is not None:
setattr(self, attrname, result)
class _CopyInternalsTraversal(HasTraversalDispatch):
"""Generate a _copy_internals internal traversal dispatch for classes
with a _traverse_internals collection."""
def visit_clauseelement(
self, attrname, parent, element, clone=_clone, **kw
):
return clone(element, **kw)
def visit_clauseelement_list(
self, attrname, parent, element, clone=_clone, **kw
):
return [clone(clause, **kw) for clause in element]
def visit_clauseelement_tuple(
self, attrname, parent, element, clone=_clone, **kw
):
return tuple([clone(clause, **kw) for clause in element])
def visit_executable_options(
self, attrname, parent, element, clone=_clone, **kw
):
return tuple([clone(clause, **kw) for clause in element])
def visit_clauseelement_unordered_set(
self, attrname, parent, element, clone=_clone, **kw
):
return {clone(clause, **kw) for clause in element}
def visit_clauseelement_tuples(
self, attrname, parent, element, clone=_clone, **kw
):
return [
tuple(clone(tup_elem, **kw) for tup_elem in elem)
for elem in element
]
def visit_string_clauseelement_dict(
self, attrname, parent, element, clone=_clone, **kw
):
return dict(
(key, clone(value, **kw)) for key, value in element.items()
)
def visit_setup_join_tuple(
self, attrname, parent, element, clone=_clone, **kw
):
return tuple(
(
clone(target, **kw) if target is not None else None,
clone(onclause, **kw) if onclause is not None else None,
clone(from_, **kw) if from_ is not None else None,
flags,
)
for (target, onclause, from_, flags) in element
)
def visit_memoized_select_entities(self, attrname, parent, element, **kw):
return self.visit_clauseelement_tuple(attrname, parent, element, **kw)
def visit_dml_ordered_values(
self, attrname, parent, element, clone=_clone, **kw
):
# sequence of 2-tuples
return [
(
clone(key, **kw)
if hasattr(key, "__clause_element__")
else key,
clone(value, **kw),
)
for key, value in element
]
def visit_dml_values(self, attrname, parent, element, clone=_clone, **kw):
return {
(
clone(key, **kw) if hasattr(key, "__clause_element__") else key
): clone(value, **kw)
for key, value in element.items()
}
def visit_dml_multi_values(
self, attrname, parent, element, clone=_clone, **kw
):
# sequence of sequences, each sequence contains a list/dict/tuple
def copy(elem):
if isinstance(elem, (list, tuple)):
return [
clone(value, **kw)
if hasattr(value, "__clause_element__")
else value
for value in elem
]
elif isinstance(elem, dict):
return {
(
clone(key, **kw)
if hasattr(key, "__clause_element__")
else key
): (
clone(value, **kw)
if hasattr(value, "__clause_element__")
else value
)
for key, value in elem.items()
}
else:
# TODO: use abc classes
assert False
return [
[copy(sub_element) for sub_element in sequence]
for sequence in element
]
def visit_propagate_attrs(
self, attrname, parent, element, clone=_clone, **kw
):
return element
_copy_internals = _CopyInternalsTraversal()
def _flatten_clauseelement(element):
while hasattr(element, "__clause_element__") and not getattr(
element, "is_clause_element", False
):
element = element.__clause_element__()
return element
class _GetChildrenTraversal(HasTraversalDispatch):
"""Generate a _children_traversal internal traversal dispatch for classes
with a _traverse_internals collection."""
def visit_has_cache_key(self, element, **kw):
# the GetChildren traversal refers explicitly to ClauseElement
# structures. Within these, a plain HasCacheKey is not a
# ClauseElement, so don't include these.
return ()
def visit_clauseelement(self, element, **kw):
return (element,)
def visit_clauseelement_list(self, element, **kw):
return element
def visit_clauseelement_tuple(self, element, **kw):
return element
def visit_clauseelement_tuples(self, element, **kw):
return itertools.chain.from_iterable(element)
def visit_fromclause_canonical_column_collection(self, element, **kw):
return ()
def visit_string_clauseelement_dict(self, element, **kw):
return element.values()
def visit_fromclause_ordered_set(self, element, **kw):
return element
def visit_clauseelement_unordered_set(self, element, **kw):
return element
def visit_setup_join_tuple(self, element, **kw):
for (target, onclause, from_, flags) in element:
if from_ is not None:
yield from_
if not isinstance(target, str):
yield _flatten_clauseelement(target)
if onclause is not None and not isinstance(onclause, str):
yield _flatten_clauseelement(onclause)
def visit_memoized_select_entities(self, element, **kw):
return self.visit_clauseelement_tuple(element, **kw)
def visit_dml_ordered_values(self, element, **kw):
for k, v in element:
if hasattr(k, "__clause_element__"):
yield k
yield v
def visit_dml_values(self, element, **kw):
expr_values = {k for k in element if hasattr(k, "__clause_element__")}
str_values = expr_values.symmetric_difference(element)
for k in sorted(str_values):
yield element[k]
for k in expr_values:
yield k
yield element[k]
def visit_dml_multi_values(self, element, **kw):
return ()
def visit_propagate_attrs(self, element, **kw):
return ()
_get_children = _GetChildrenTraversal()
@util.preload_module("sqlalchemy.sql.elements")
def _resolve_name_for_compare(element, name, anon_map, **kw):
if isinstance(name, util.preloaded.sql_elements._anonymous_label):
name = name.apply_map(anon_map)
return name
class TraversalComparatorStrategy(HasTraversalDispatch, util.MemoizedSlots):
__slots__ = "stack", "cache", "anon_map"
def __init__(self):
self.stack: Deque[
Tuple[ExternallyTraversible, ExternallyTraversible]
] = deque()
self.cache = set()
def _memoized_attr_anon_map(self):
return (anon_map(), anon_map())
def compare(self, obj1, obj2, **kw):
stack = self.stack
cache = self.cache
compare_annotations = kw.get("compare_annotations", False)
stack.append((obj1, obj2))
while stack:
left, right = stack.popleft()
if left is right:
continue
elif left is None or right is None:
# we know they are different so no match
return False
elif (left, right) in cache:
continue
cache.add((left, right))
visit_name = left.__visit_name__
if visit_name != right.__visit_name__:
return False
meth = getattr(self, "compare_%s" % visit_name, None)
if meth:
attributes_compared = meth(left, right, **kw)
if attributes_compared is COMPARE_FAILED:
return False
elif attributes_compared is SKIP_TRAVERSE:
continue
# attributes_compared is returned as a list of attribute
# names that were "handled" by the comparison method above.
# remaining attribute names in the _traverse_internals
# will be compared.
else:
attributes_compared = ()
for (
(left_attrname, left_visit_sym),
(right_attrname, right_visit_sym),
) in zip_longest(
left._traverse_internals,
right._traverse_internals,
fillvalue=(None, None),
):
if not compare_annotations and (
(left_attrname == "_annotations")
or (right_attrname == "_annotations")
):
continue
if (
left_attrname != right_attrname
or left_visit_sym is not right_visit_sym
):
return False
elif left_attrname in attributes_compared:
continue
dispatch = self.dispatch(left_visit_sym)
assert dispatch, (
f"{self.__class__} has no dispatch for "
f"'{self._dispatch_lookup[left_visit_sym]}'"
)
left_child = operator.attrgetter(left_attrname)(left)
right_child = operator.attrgetter(right_attrname)(right)
if left_child is None:
if right_child is not None:
return False
else:
continue
comparison = dispatch(
left_attrname, left, left_child, right, right_child, **kw
)
if comparison is COMPARE_FAILED:
return False
return True
def compare_inner(self, obj1, obj2, **kw):
comparator = self.__class__()
return comparator.compare(obj1, obj2, **kw)
def visit_has_cache_key(
self, attrname, left_parent, left, right_parent, right, **kw
):
if left._gen_cache_key(self.anon_map[0], []) != right._gen_cache_key(
self.anon_map[1], []
):
return COMPARE_FAILED
def visit_propagate_attrs(
self, attrname, left_parent, left, right_parent, right, **kw
):
return self.compare_inner(
left.get("plugin_subject", None), right.get("plugin_subject", None)
)
def visit_has_cache_key_list(
self, attrname, left_parent, left, right_parent, right, **kw
):
for l, r in zip_longest(left, right, fillvalue=None):
if l._gen_cache_key(self.anon_map[0], []) != r._gen_cache_key(
self.anon_map[1], []
):
return COMPARE_FAILED
def visit_executable_options(
self, attrname, left_parent, left, right_parent, right, **kw
):
for l, r in zip_longest(left, right, fillvalue=None):
if (
l._gen_cache_key(self.anon_map[0], [])
if l._is_has_cache_key
else l
) != (
r._gen_cache_key(self.anon_map[1], [])
if r._is_has_cache_key
else r
):
return COMPARE_FAILED
def visit_clauseelement(
self, attrname, left_parent, left, right_parent, right, **kw
):
self.stack.append((left, right))
def visit_fromclause_canonical_column_collection(
self, attrname, left_parent, left, right_parent, right, **kw
):
for lcol, rcol in zip_longest(left, right, fillvalue=None):
self.stack.append((lcol, rcol))
def visit_fromclause_derived_column_collection(
self, attrname, left_parent, left, right_parent, right, **kw
):
pass
def visit_string_clauseelement_dict(
self, attrname, left_parent, left, right_parent, right, **kw
):
for lstr, rstr in zip_longest(
sorted(left), sorted(right), fillvalue=None
):
if lstr != rstr:
return COMPARE_FAILED
self.stack.append((left[lstr], right[rstr]))
def visit_clauseelement_tuples(
self, attrname, left_parent, left, right_parent, right, **kw
):
for ltup, rtup in zip_longest(left, right, fillvalue=None):
if ltup is None or rtup is None:
return COMPARE_FAILED
for l, r in zip_longest(ltup, rtup, fillvalue=None):
self.stack.append((l, r))
def visit_clauseelement_list(
self, attrname, left_parent, left, right_parent, right, **kw
):
for l, r in zip_longest(left, right, fillvalue=None):
self.stack.append((l, r))
def visit_clauseelement_tuple(
self, attrname, left_parent, left, right_parent, right, **kw
):
for l, r in zip_longest(left, right, fillvalue=None):
self.stack.append((l, r))
def _compare_unordered_sequences(self, seq1, seq2, **kw):
if seq1 is None:
return seq2 is None
completed: Set[object] = set()
for clause in seq1:
for other_clause in set(seq2).difference(completed):
if self.compare_inner(clause, other_clause, **kw):
completed.add(other_clause)
break
return len(completed) == len(seq1) == len(seq2)
def visit_clauseelement_unordered_set(
self, attrname, left_parent, left, right_parent, right, **kw
):
return self._compare_unordered_sequences(left, right, **kw)
def visit_fromclause_ordered_set(
self, attrname, left_parent, left, right_parent, right, **kw
):
for l, r in zip_longest(left, right, fillvalue=None):
self.stack.append((l, r))
def visit_string(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_string_list(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_string_multi_dict(
self, attrname, left_parent, left, right_parent, right, **kw
):
for lk, rk in zip_longest(
sorted(left.keys()), sorted(right.keys()), fillvalue=(None, None)
):
if lk != rk:
return COMPARE_FAILED
lv, rv = left[lk], right[rk]
lhc = isinstance(left, HasCacheKey)
rhc = isinstance(right, HasCacheKey)
if lhc and rhc:
if lv._gen_cache_key(
self.anon_map[0], []
) != rv._gen_cache_key(self.anon_map[1], []):
return COMPARE_FAILED
elif lhc != rhc:
return COMPARE_FAILED
elif lv != rv:
return COMPARE_FAILED
def visit_multi(
self, attrname, left_parent, left, right_parent, right, **kw
):
lhc = isinstance(left, HasCacheKey)
rhc = isinstance(right, HasCacheKey)
if lhc and rhc:
if left._gen_cache_key(
self.anon_map[0], []
) != right._gen_cache_key(self.anon_map[1], []):
return COMPARE_FAILED
elif lhc != rhc:
return COMPARE_FAILED
else:
return left == right
def visit_anon_name(
self, attrname, left_parent, left, right_parent, right, **kw
):
return _resolve_name_for_compare(
left_parent, left, self.anon_map[0], **kw
) == _resolve_name_for_compare(
right_parent, right, self.anon_map[1], **kw
)
def visit_boolean(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_operator(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left is right
def visit_type(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left._compare_type_affinity(right)
def visit_plain_dict(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_dialect_options(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_annotations_key(
self, attrname, left_parent, left, right_parent, right, **kw
):
if left and right:
return (
left_parent._annotations_cache_key
== right_parent._annotations_cache_key
)
else:
return left == right
def visit_with_context_options(
self, attrname, left_parent, left, right_parent, right, **kw
):
return tuple((fn.__code__, c_key) for fn, c_key in left) == tuple(
(fn.__code__, c_key) for fn, c_key in right
)
def visit_plain_obj(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_named_ddl_element(
self, attrname, left_parent, left, right_parent, right, **kw
):
if left is None:
if right is not None:
return COMPARE_FAILED
return left.name == right.name
def visit_prefix_sequence(
self, attrname, left_parent, left, right_parent, right, **kw
):
for (l_clause, l_str), (r_clause, r_str) in zip_longest(
left, right, fillvalue=(None, None)
):
if l_str != r_str:
return COMPARE_FAILED
else:
self.stack.append((l_clause, r_clause))
def visit_setup_join_tuple(
self, attrname, left_parent, left, right_parent, right, **kw
):
# TODO: look at attrname for "legacy_join" and use different structure
for (
(l_target, l_onclause, l_from, l_flags),
(r_target, r_onclause, r_from, r_flags),
) in zip_longest(left, right, fillvalue=(None, None, None, None)):
if l_flags != r_flags:
return COMPARE_FAILED
self.stack.append((l_target, r_target))
self.stack.append((l_onclause, r_onclause))
self.stack.append((l_from, r_from))
def visit_memoized_select_entities(
self, attrname, left_parent, left, right_parent, right, **kw
):
return self.visit_clauseelement_tuple(
attrname, left_parent, left, right_parent, right, **kw
)
def visit_table_hint_list(
self, attrname, left_parent, left, right_parent, right, **kw
):
left_keys = sorted(left, key=lambda elem: (elem[0].fullname, elem[1]))
right_keys = sorted(
right, key=lambda elem: (elem[0].fullname, elem[1])
)
for (ltable, ldialect), (rtable, rdialect) in zip_longest(
left_keys, right_keys, fillvalue=(None, None)
):
if ldialect != rdialect:
return COMPARE_FAILED
elif left[(ltable, ldialect)] != right[(rtable, rdialect)]:
return COMPARE_FAILED
else:
self.stack.append((ltable, rtable))
def visit_statement_hint_list(
self, attrname, left_parent, left, right_parent, right, **kw
):
return left == right
def visit_unknown_structure(
self, attrname, left_parent, left, right_parent, right, **kw
):
raise NotImplementedError()
def visit_dml_ordered_values(
self, attrname, left_parent, left, right_parent, right, **kw
):
# sequence of tuple pairs
for (lk, lv), (rk, rv) in zip_longest(
left, right, fillvalue=(None, None)
):
if not self._compare_dml_values_or_ce(lk, rk, **kw):
return COMPARE_FAILED
def _compare_dml_values_or_ce(self, lv, rv, **kw):
lvce = hasattr(lv, "__clause_element__")
rvce = hasattr(rv, "__clause_element__")
if lvce != rvce:
return False
elif lvce and not self.compare_inner(lv, rv, **kw):
return False
elif not lvce and lv != rv:
return False
elif not self.compare_inner(lv, rv, **kw):
return False
return True
def visit_dml_values(
self, attrname, left_parent, left, right_parent, right, **kw
):
if left is None or right is None or len(left) != len(right):
return COMPARE_FAILED
if isinstance(left, collections_abc.Sequence):
for lv, rv in zip(left, right):
if not self._compare_dml_values_or_ce(lv, rv, **kw):
return COMPARE_FAILED
elif isinstance(right, collections_abc.Sequence):
return COMPARE_FAILED
else:
# dictionaries guaranteed to support insert ordering in
# py37 so that we can compare the keys in order. without
# this, we can't compare SQL expression keys because we don't
# know which key is which
for (lk, lv), (rk, rv) in zip(left.items(), right.items()):
if not self._compare_dml_values_or_ce(lk, rk, **kw):
return COMPARE_FAILED
if not self._compare_dml_values_or_ce(lv, rv, **kw):
return COMPARE_FAILED
def visit_dml_multi_values(
self, attrname, left_parent, left, right_parent, right, **kw
):
for lseq, rseq in zip_longest(left, right, fillvalue=None):
if lseq is None or rseq is None:
return COMPARE_FAILED
for ld, rd in zip_longest(lseq, rseq, fillvalue=None):
if (
self.visit_dml_values(
attrname, left_parent, ld, right_parent, rd, **kw
)
is COMPARE_FAILED
):
return COMPARE_FAILED
def compare_expression_clauselist(self, left, right, **kw):
if left.operator is right.operator:
if operators.is_associative(left.operator):
if self._compare_unordered_sequences(
left.clauses, right.clauses, **kw
):
return ["operator", "clauses"]
else:
return COMPARE_FAILED
else:
return ["operator"]
else:
return COMPARE_FAILED
def compare_clauselist(self, left, right, **kw):
return self.compare_expression_clauselist(left, right, **kw)
def compare_binary(self, left, right, **kw):
if left.operator == right.operator:
if operators.is_commutative(left.operator):
if (
self.compare_inner(left.left, right.left, **kw)
and self.compare_inner(left.right, right.right, **kw)
) or (
self.compare_inner(left.left, right.right, **kw)
and self.compare_inner(left.right, right.left, **kw)
):
return ["operator", "negate", "left", "right"]
else:
return COMPARE_FAILED
else:
return ["operator", "negate"]
else:
return COMPARE_FAILED
def compare_bindparam(self, left, right, **kw):
compare_keys = kw.pop("compare_keys", True)
compare_values = kw.pop("compare_values", True)
if compare_values:
omit = []
else:
# this means, "skip these, we already compared"
omit = ["callable", "value"]
if not compare_keys:
omit.append("key")
return omit
class ColIdentityComparatorStrategy(TraversalComparatorStrategy):
def compare_column_element(
self, left, right, use_proxies=True, equivalents=(), **kw
):
"""Compare ColumnElements using proxies and equivalent collections.
This is a comparison strategy specific to the ORM.
"""
to_compare = (right,)
if equivalents and right in equivalents:
to_compare = equivalents[right].union(to_compare)
for oth in to_compare:
if use_proxies and left.shares_lineage(oth):
return SKIP_TRAVERSE
elif hash(left) == hash(right):
return SKIP_TRAVERSE
else:
return COMPARE_FAILED
def compare_column(self, left, right, **kw):
return self.compare_column_element(left, right, **kw)
def compare_label(self, left, right, **kw):
return self.compare_column_element(left, right, **kw)
def compare_table(self, left, right, **kw):
# tables compare on identity, since it's not really feasible to
# compare them column by column with the above rules
return SKIP_TRAVERSE if left is right else COMPARE_FAILED
| 33.00495 | 104 | 0.59184 |
aceed9aa7ee8e28a42161abb717c4579fdc08da9 | 3,071 | py | Python | homeassistant/components/tile/__init__.py | larsvinc/core | 9eb854ef0a6ddb5ecdc4dbe639bedbadab80c90d | [
"Apache-2.0"
] | 3 | 2021-04-27T16:37:48.000Z | 2022-02-23T02:47:33.000Z | homeassistant/components/tile/__init__.py | larsvinc/core | 9eb854ef0a6ddb5ecdc4dbe639bedbadab80c90d | [
"Apache-2.0"
] | 29 | 2021-02-19T07:21:11.000Z | 2022-03-04T06:05:06.000Z | homeassistant/components/tile/__init__.py | larsvinc/core | 9eb854ef0a6ddb5ecdc4dbe639bedbadab80c90d | [
"Apache-2.0"
] | 1 | 2021-08-07T10:08:32.000Z | 2021-08-07T10:08:32.000Z | """The Tile component."""
import asyncio
from datetime import timedelta
from functools import partial
from pytile import async_login
from pytile.errors import SessionExpiredError, TileError
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util.async_ import gather_with_concurrency
from .const import DATA_COORDINATOR, DATA_TILE, DOMAIN, LOGGER
PLATFORMS = ["device_tracker"]
DEVICE_TYPES = ["PHONE", "TILE"]
DEFAULT_INIT_TASK_LIMIT = 2
DEFAULT_UPDATE_INTERVAL = timedelta(minutes=2)
CONF_SHOW_INACTIVE = "show_inactive"
async def async_setup(hass, config):
"""Set up the Tile component."""
hass.data[DOMAIN] = {DATA_COORDINATOR: {}, DATA_TILE: {}}
return True
async def async_setup_entry(hass, entry):
"""Set up Tile as config entry."""
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id] = {}
hass.data[DOMAIN][DATA_TILE][entry.entry_id] = {}
websession = aiohttp_client.async_get_clientsession(hass)
try:
client = await async_login(
entry.data[CONF_USERNAME],
entry.data[CONF_PASSWORD],
session=websession,
)
hass.data[DOMAIN][DATA_TILE][entry.entry_id] = await client.async_get_tiles()
except TileError as err:
raise ConfigEntryNotReady("Error during integration setup") from err
async def async_update_tile(tile):
"""Update the Tile."""
try:
return await tile.async_update()
except SessionExpiredError:
LOGGER.info("Tile session expired; creating a new one")
await client.async_init()
except TileError as err:
raise UpdateFailed(f"Error while retrieving data: {err}") from err
coordinator_init_tasks = []
for tile_uuid, tile in hass.data[DOMAIN][DATA_TILE][entry.entry_id].items():
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][
tile_uuid
] = DataUpdateCoordinator(
hass,
LOGGER,
name=tile.name,
update_interval=DEFAULT_UPDATE_INTERVAL,
update_method=partial(async_update_tile, tile),
)
coordinator_init_tasks.append(coordinator.async_refresh())
await gather_with_concurrency(DEFAULT_INIT_TASK_LIMIT, *coordinator_init_tasks)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass, entry):
"""Unload a Tile config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_COORDINATOR].pop(entry.entry_id)
return unload_ok
| 31.989583 | 88 | 0.691306 |
aceeda3ac2d8ae41fcbc22cad4068adea91ae7e1 | 67,464 | py | Python | python/ccxt/huobi.py | PolozovT/ccxt | 5413a2e7306fa4c0cd992e407dc46fb519104fda | [
"MIT"
] | null | null | null | python/ccxt/huobi.py | PolozovT/ccxt | 5413a2e7306fa4c0cd992e407dc46fb519104fda | [
"MIT"
] | null | null | null | python/ccxt/huobi.py | PolozovT/ccxt | 5413a2e7306fa4c0cd992e407dc46fb519104fda | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import hashlib
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import PermissionDenied
from ccxt.base.errors import ArgumentsRequired
from ccxt.base.errors import BadRequest
from ccxt.base.errors import BadSymbol
from ccxt.base.errors import InsufficientFunds
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import OrderNotFound
from ccxt.base.errors import NetworkError
from ccxt.base.errors import ExchangeNotAvailable
from ccxt.base.errors import OnMaintenance
from ccxt.base.errors import RequestTimeout
from ccxt.base.decimal_to_precision import TRUNCATE
from ccxt.base.precise import Precise
class huobi(Exchange):
def describe(self):
return self.deep_extend(super(huobi, self).describe(), {
'id': 'huobi',
'name': 'Huobi',
'countries': ['CN'],
'rateLimit': 2000,
'userAgent': self.userAgents['chrome39'],
'version': 'v1',
'accounts': None,
'accountsById': None,
'hostname': 'api.huobi.pro', # api.testnet.huobi.pro
'pro': True,
'has': {
'cancelAllOrders': True,
'cancelOrder': True,
'cancelOrders': True,
'CORS': False,
'createOrder': True,
'fetchBalance': True,
'fetchClosedOrders': True,
'fetchCurrencies': True,
'fetchDepositAddress': True,
'fetchDeposits': True,
'fetchMarkets': True,
'fetchMyTrades': True,
'fetchOHLCV': True,
'fetchOpenOrders': True,
'fetchOrder': True,
'fetchOrderBook': True,
'fetchOrders': True,
'fetchOrderTrades': True,
'fetchTicker': True,
'fetchTickers': True,
'fetchTrades': True,
'fetchTradingLimits': True,
'fetchWithdrawals': True,
'withdraw': True,
},
'timeframes': {
'1m': '1min',
'5m': '5min',
'15m': '15min',
'30m': '30min',
'1h': '60min',
'4h': '4hour',
'1d': '1day',
'1w': '1week',
'1M': '1mon',
'1y': '1year',
},
'urls': {
'test': {
'market': 'https://api.testnet.huobi.pro',
'public': 'https://api.testnet.huobi.pro',
'private': 'https://api.testnet.huobi.pro',
},
'logo': 'https://user-images.githubusercontent.com/1294454/76137448-22748a80-604e-11ea-8069-6e389271911d.jpg',
'api': {
'market': 'https://{hostname}',
'public': 'https://{hostname}',
'private': 'https://{hostname}',
'v2Public': 'https://{hostname}',
'v2Private': 'https://{hostname}',
},
'www': 'https://www.huobi.com',
'referral': {
'url': 'https://www.huobi.com/en-us/topic/double-reward/?invite_code=6rmm2223',
'discount': 0.15,
},
'doc': [
'https://huobiapi.github.io/docs/spot/v1/cn/',
'https://huobiapi.github.io/docs/dm/v1/cn/',
'https://huobiapi.github.io/docs/coin_margined_swap/v1/cn/',
'https://huobiapi.github.io/docs/usdt_swap/v1/cn/',
'https://huobiapi.github.io/docs/option/v1/cn/',
],
'fees': 'https://www.huobi.com/about/fee/',
},
'api': {
'v2Public': {
'get': [
'reference/currencies', # 币链参考信息
'market-status', # 获取当前市场状态
],
},
'v2Private': {
'get': [
'account/ledger',
'account/withdraw/quota',
'account/withdraw/address', # 提币地址查询(限母用户可用)
'account/deposit/address',
'account/repayment', # 还币交易记录查询
'reference/transact-fee-rate',
'account/asset-valuation', # 获取账户资产估值
'point/account', # 点卡余额查询
'sub-user/user-list', # 获取子用户列表
'sub-user/user-state', # 获取特定子用户的用户状态
'sub-user/account-list', # 获取特定子用户的账户列表
'sub-user/deposit-address', # 子用户充币地址查询
'sub-user/query-deposit', # 子用户充币记录查询
'user/api-key', # 母子用户API key信息查询
'user/uid', # 母子用户获取用户UID
'algo-orders/opening', # 查询未触发OPEN策略委托
'algo-orders/history', # 查询策略委托历史
'algo-orders/specific', # 查询特定策略委托
'c2c/offers', # 查询借入借出订单
'c2c/offer', # 查询特定借入借出订单及其交易记录
'c2c/transactions', # 查询借入借出交易记录
'c2c/repayment', # 查询还币交易记录
'c2c/account', # 查询账户余额
'etp/reference', # 基础参考信息
'etp/transactions', # 获取杠杆ETP申赎记录
'etp/transaction', # 获取特定杠杆ETP申赎记录
'etp/rebalance', # 获取杠杆ETP调仓记录
'etp/limit', # 获取ETP持仓限额
],
'post': [
'account/transfer',
'account/repayment', # 归还借币(全仓逐仓通用)
'point/transfer', # 点卡划转
'sub-user/management', # 冻结/解冻子用户
'sub-user/creation', # 子用户创建
'sub-user/tradable-market', # 设置子用户交易权限
'sub-user/transferability', # 设置子用户资产转出权限
'sub-user/api-key-generation', # 子用户API key创建
'sub-user/api-key-modification', # 修改子用户API key
'sub-user/api-key-deletion', # 删除子用户API key
'sub-user/deduct-mode', # 设置子用户手续费抵扣模式
'algo-orders', # 策略委托下单
'algo-orders/cancel-all-after', # 自动撤销订单
'algo-orders/cancellation', # 策略委托(触发前)撤单
'c2c/offer', # 借入借出下单
'c2c/cancellation', # 借入借出撤单
'c2c/cancel-all', # 撤销所有借入借出订单
'c2c/repayment', # 还币
'c2c/transfer', # 资产划转
'etp/creation', # 杠杆ETP换入
'etp/redemption', # 杠杆ETP换出
'etp/{transactId}/cancel', # 杠杆ETP单个撤单
'etp/batch-cancel', # 杠杆ETP批量撤单
],
},
'market': {
'get': [
'history/kline', # 获取K线数据
'detail/merged', # 获取聚合行情(Ticker)
'depth', # 获取 Market Depth 数据
'trade', # 获取 Trade Detail 数据
'history/trade', # 批量获取最近的交易记录
'detail', # 获取 Market Detail 24小时成交量数据
'tickers',
'etp', # 获取杠杆ETP实时净值
],
},
'public': {
'get': [
'common/symbols', # 查询系统支持的所有交易对
'common/currencys', # 查询系统支持的所有币种
'common/timestamp', # 查询系统当前时间
'common/exchange', # order limits
'settings/currencys', # ?language=en-US
],
},
'private': {
'get': [
'account/accounts', # 查询当前用户的所有账户(即account-id)
'account/accounts/{id}/balance', # 查询指定账户的余额
'account/accounts/{sub-uid}',
'account/history',
'cross-margin/loan-info',
'margin/loan-info', # 查询借币币息率及额度
'fee/fee-rate/get',
'order/openOrders',
'order/orders',
'order/orders/{id}', # 查询某个订单详情
'order/orders/{id}/matchresults', # 查询某个订单的成交明细
'order/orders/getClientOrder',
'order/history', # 查询当前委托、历史委托
'order/matchresults', # 查询当前成交、历史成交
'dw/withdraw-virtual/addresses', # 查询虚拟币提现地址(Deprecated)
'query/deposit-withdraw',
'margin/loan-info',
'margin/loan-orders', # 借贷订单
'margin/accounts/balance', # 借贷账户详情
'cross-margin/loan-orders', # 查询借币订单
'cross-margin/accounts/balance', # 借币账户详情
'points/actions',
'points/orders',
'subuser/aggregate-balance',
'stable-coin/exchange_rate',
'stable-coin/quote',
],
'post': [
'account/transfer', # 资产划转(该节点为母用户和子用户进行资产划转的通用接口。)
'futures/transfer',
'order/batch-orders',
'order/orders/place', # 创建并执行一个新订单(一步下单, 推荐使用)
'order/orders/submitCancelClientOrder',
'order/orders/batchCancelOpenOrders',
'order/orders', # 创建一个新的订单请求 (仅创建订单,不执行下单)
'order/orders/{id}/place', # 执行一个订单 (仅执行已创建的订单)
'order/orders/{id}/submitcancel', # 申请撤销一个订单请求
'order/orders/batchcancel', # 批量撤销订单
'dw/balance/transfer', # 资产划转
'dw/withdraw/api/create', # 申请提现虚拟币
'dw/withdraw-virtual/create', # 申请提现虚拟币
'dw/withdraw-virtual/{id}/place', # 确认申请虚拟币提现(Deprecated)
'dw/withdraw-virtual/{id}/cancel', # 申请取消提现虚拟币
'dw/transfer-in/margin', # 现货账户划入至借贷账户
'dw/transfer-out/margin', # 借贷账户划出至现货账户
'margin/orders', # 申请借贷
'margin/orders/{id}/repay', # 归还借贷
'cross-margin/transfer-in', # 资产划转
'cross-margin/transfer-out', # 资产划转
'cross-margin/orders', # 申请借币
'cross-margin/orders/{id}/repay', # 归还借币
'stable-coin/exchange',
'subuser/transfer',
],
},
},
'fees': {
'trading': {
'feeSide': 'get',
'tierBased': False,
'percentage': True,
'maker': 0.002,
'taker': 0.002,
},
},
'exceptions': {
'broad': {
'contract is restricted of closing positions on API. Please contact customer service': OnMaintenance,
'maintain': OnMaintenance,
},
'exact': {
# err-code
'bad-request': BadRequest,
'base-date-limit-error': BadRequest, # {"status":"error","err-code":"base-date-limit-error","err-msg":"date less than system limit","data":null}
'api-not-support-temp-addr': PermissionDenied, # {"status":"error","err-code":"api-not-support-temp-addr","err-msg":"API withdrawal does not support temporary addresses","data":null}
'timeout': RequestTimeout, # {"ts":1571653730865,"status":"error","err-code":"timeout","err-msg":"Request Timeout"}
'gateway-internal-error': ExchangeNotAvailable, # {"status":"error","err-code":"gateway-internal-error","err-msg":"Failed to load data. Try again later.","data":null}
'account-frozen-balance-insufficient-error': InsufficientFunds, # {"status":"error","err-code":"account-frozen-balance-insufficient-error","err-msg":"trade account balance is not enough, left: `0.0027`","data":null}
'invalid-amount': InvalidOrder, # eg "Paramemter `amount` is invalid."
'order-limitorder-amount-min-error': InvalidOrder, # limit order amount error, min: `0.001`
'order-limitorder-amount-max-error': InvalidOrder, # market order amount error, max: `1000000`
'order-marketorder-amount-min-error': InvalidOrder, # market order amount error, min: `0.01`
'order-limitorder-price-min-error': InvalidOrder, # limit order price error
'order-limitorder-price-max-error': InvalidOrder, # limit order price error
'order-holding-limit-failed': InvalidOrder, # {"status":"error","err-code":"order-holding-limit-failed","err-msg":"Order failed, exceeded the holding limit of self currency","data":null}
'order-orderprice-precision-error': InvalidOrder, # {"status":"error","err-code":"order-orderprice-precision-error","err-msg":"order price precision error, scale: `4`","data":null}
'order-etp-nav-price-max-error': InvalidOrder, # {"status":"error","err-code":"order-etp-nav-price-max-error","err-msg":"Order price cannot be higher than 5% of NAV","data":null}
'order-orderstate-error': OrderNotFound, # canceling an already canceled order
'order-queryorder-invalid': OrderNotFound, # querying a non-existent order
'order-update-error': ExchangeNotAvailable, # undocumented error
'api-signature-check-failed': AuthenticationError,
'api-signature-not-valid': AuthenticationError, # {"status":"error","err-code":"api-signature-not-valid","err-msg":"Signature not valid: Incorrect Access key [Access key错误]","data":null}
'base-record-invalid': OrderNotFound, # https://github.com/ccxt/ccxt/issues/5750
'base-symbol-trade-disabled': BadSymbol, # {"status":"error","err-code":"base-symbol-trade-disabled","err-msg":"Trading is disabled for self symbol","data":null}
'base-symbol-error': BadSymbol, # {"status":"error","err-code":"base-symbol-error","err-msg":"The symbol is invalid","data":null}
'system-maintenance': OnMaintenance, # {"status": "error", "err-code": "system-maintenance", "err-msg": "System is in maintenance!", "data": null}
# err-msg
'invalid symbol': BadSymbol, # {"ts":1568813334794,"status":"error","err-code":"invalid-parameter","err-msg":"invalid symbol"}
'symbol trade not open now': BadSymbol, # {"ts":1576210479343,"status":"error","err-code":"invalid-parameter","err-msg":"symbol trade not open now"}
},
},
'options': {
# https://github.com/ccxt/ccxt/issues/5376
'fetchOrdersByStatesMethod': 'private_get_order_orders', # 'private_get_order_history' # https://github.com/ccxt/ccxt/pull/5392
'fetchOpenOrdersMethod': 'fetch_open_orders_v1', # 'fetch_open_orders_v2' # https://github.com/ccxt/ccxt/issues/5388
'createMarketBuyOrderRequiresPrice': True,
'fetchMarketsMethod': 'publicGetCommonSymbols',
'fetchBalanceMethod': 'privateGetAccountAccountsIdBalance',
'createOrderMethod': 'privatePostOrderOrdersPlace',
'language': 'en-US',
'broker': {
'id': 'AA03022abc',
},
},
'commonCurrencies': {
# https://github.com/ccxt/ccxt/issues/6081
# https://github.com/ccxt/ccxt/issues/3365
# https://github.com/ccxt/ccxt/issues/2873
'GET': 'Themis', # conflict with GET(Guaranteed Entrance Token, GET Protocol)
'GTC': 'Game.com', # conflict with Gitcoin and Gastrocoin
'HIT': 'HitChain',
'HOT': 'Hydro Protocol', # conflict with HOT(Holo) https://github.com/ccxt/ccxt/issues/4929
# https://github.com/ccxt/ccxt/issues/7399
# https://coinmarketcap.com/currencies/pnetwork/
# https://coinmarketcap.com/currencies/penta/markets/
# https://en.cryptonomist.ch/blog/eidoo/the-edo-to-pnt-upgrade-what-you-need-to-know-updated/
'PNT': 'Penta',
'SBTC': 'Super Bitcoin',
'BIFI': 'Bitcoin File', # conflict with Beefy.Finance https://github.com/ccxt/ccxt/issues/8706
},
})
def fetch_trading_limits(self, symbols=None, params={}):
# self method should not be called directly, use loadTradingLimits() instead
# by default it will try load withdrawal fees of all currencies(with separate requests)
# however if you define symbols = ['ETH/BTC', 'LTC/BTC'] in args it will only load those
self.load_markets()
if symbols is None:
symbols = self.symbols
result = {}
for i in range(0, len(symbols)):
symbol = symbols[i]
result[symbol] = self.fetch_trading_limits_by_id(self.market_id(symbol), params)
return result
def fetch_trading_limits_by_id(self, id, params={}):
request = {
'symbol': id,
}
response = self.publicGetCommonExchange(self.extend(request, params))
#
# {status: "ok",
# data: { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }}
#
return self.parse_trading_limits(self.safe_value(response, 'data', {}))
def parse_trading_limits(self, limits, symbol=None, params={}):
#
# { symbol: "aidocbtc",
# 'buy-limit-must-less-than': 1.1,
# 'sell-limit-must-greater-than': 0.9,
# 'limit-order-must-greater-than': 1,
# 'limit-order-must-less-than': 5000000,
# 'market-buy-order-must-greater-than': 0.0001,
# 'market-buy-order-must-less-than': 100,
# 'market-sell-order-must-greater-than': 1,
# 'market-sell-order-must-less-than': 500000,
# 'circuit-break-when-greater-than': 10000,
# 'circuit-break-when-less-than': 10,
# 'market-sell-order-rate-must-less-than': 0.1,
# 'market-buy-order-rate-must-less-than': 0.1 }
#
return {
'info': limits,
'limits': {
'amount': {
'min': self.safe_number(limits, 'limit-order-must-greater-than'),
'max': self.safe_number(limits, 'limit-order-must-less-than'),
},
},
}
def cost_to_precision(self, symbol, cost):
return self.decimal_to_precision(cost, TRUNCATE, self.markets[symbol]['precision']['cost'], self.precisionMode)
def fetch_markets(self, params={}):
method = self.options['fetchMarketsMethod']
response = getattr(self, method)(params)
markets = self.safe_value(response, 'data')
numMarkets = len(markets)
if numMarkets < 1:
raise NetworkError(self.id + ' publicGetCommonSymbols returned empty response: ' + self.json(markets))
result = []
for i in range(0, len(markets)):
market = markets[i]
baseId = self.safe_string(market, 'base-currency')
quoteId = self.safe_string(market, 'quote-currency')
id = baseId + quoteId
base = self.safe_currency_code(baseId)
quote = self.safe_currency_code(quoteId)
symbol = base + '/' + quote
precision = {
'amount': self.safe_integer(market, 'amount-precision'),
'price': self.safe_integer(market, 'price-precision'),
'cost': self.safe_integer(market, 'value-precision'),
}
maker = 0 if (base == 'OMG') else 0.2 / 100
taker = 0 if (base == 'OMG') else 0.2 / 100
minAmount = self.safe_number(market, 'min-order-amt', math.pow(10, -precision['amount']))
maxAmount = self.safe_number(market, 'max-order-amt')
minCost = self.safe_number(market, 'min-order-value', 0)
state = self.safe_string(market, 'state')
active = (state == 'online')
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': active,
'precision': precision,
'taker': taker,
'maker': maker,
'limits': {
'amount': {
'min': minAmount,
'max': maxAmount,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': None,
},
'cost': {
'min': minCost,
'max': None,
},
},
'info': market,
})
return result
def parse_ticker(self, ticker, market=None):
#
# fetchTicker
#
# {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
#
# fetchTickers
# {
# symbol: "bhdht",
# open: 2.3938,
# high: 2.4151,
# low: 2.3323,
# close: 2.3909,
# amount: 628.992,
# vol: 1493.71841095,
# count: 2088,
# bid: 2.3643,
# bidSize: 0.7136,
# ask: 2.4061,
# askSize: 0.4156
# }
#
symbol = None
if market is not None:
symbol = market['symbol']
timestamp = self.safe_integer(ticker, 'ts')
bid = None
bidVolume = None
ask = None
askVolume = None
if 'bid' in ticker:
if isinstance(ticker['bid'], list):
bid = self.safe_number(ticker['bid'], 0)
bidVolume = self.safe_number(ticker['bid'], 1)
else:
bid = self.safe_number(ticker, 'bid')
bidVolume = self.safe_value(ticker, 'bidSize')
if 'ask' in ticker:
if isinstance(ticker['ask'], list):
ask = self.safe_number(ticker['ask'], 0)
askVolume = self.safe_number(ticker['ask'], 1)
else:
ask = self.safe_number(ticker, 'ask')
askVolume = self.safe_value(ticker, 'askSize')
open = self.safe_number(ticker, 'open')
close = self.safe_number(ticker, 'close')
change = None
percentage = None
average = None
if (open is not None) and (close is not None):
change = close - open
average = self.sum(open, close) / 2
if (close is not None) and (close > 0):
percentage = (change / open) * 100
baseVolume = self.safe_number(ticker, 'amount')
quoteVolume = self.safe_number(ticker, 'vol')
vwap = self.vwap(baseVolume, quoteVolume)
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_number(ticker, 'high'),
'low': self.safe_number(ticker, 'low'),
'bid': bid,
'bidVolume': bidVolume,
'ask': ask,
'askVolume': askVolume,
'vwap': vwap,
'open': open,
'close': close,
'last': close,
'previousClose': None,
'change': change,
'percentage': percentage,
'average': average,
'baseVolume': baseVolume,
'quoteVolume': quoteVolume,
'info': ticker,
}
def fetch_order_book(self, symbol, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'type': 'step0',
}
response = self.marketGetDepth(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.depth.step0",
# "ts": 1583474832790,
# "tick": {
# "bids": [
# [9100.290000000000000000, 0.200000000000000000],
# [9099.820000000000000000, 0.200000000000000000],
# [9099.610000000000000000, 0.205000000000000000],
# ],
# "asks": [
# [9100.640000000000000000, 0.005904000000000000],
# [9101.010000000000000000, 0.287311000000000000],
# [9101.030000000000000000, 0.012121000000000000],
# ],
# "ts":1583474832008,
# "version":104999698780
# }
# }
#
if 'tick' in response:
if not response['tick']:
raise BadSymbol(self.id + ' fetchOrderBook() returned empty response: ' + self.json(response))
tick = self.safe_value(response, 'tick')
timestamp = self.safe_integer(tick, 'ts', self.safe_integer(response, 'ts'))
result = self.parse_order_book(tick, symbol, timestamp)
result['nonce'] = self.safe_integer(tick, 'version')
return result
raise ExchangeError(self.id + ' fetchOrderBook() returned unrecognized response: ' + self.json(response))
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
response = self.marketGetDetailMerged(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.detail.merged",
# "ts": 1583494336669,
# "tick": {
# "amount": 26228.672978342216,
# "open": 9078.95,
# "close": 9146.86,
# "high": 9155.41,
# "id": 209988544334,
# "count": 265846,
# "low": 8988.0,
# "version": 209988544334,
# "ask": [9146.87, 0.156134],
# "vol": 2.3822168242201668E8,
# "bid": [9146.86, 0.080758],
# }
# }
#
ticker = self.parse_ticker(response['tick'], market)
timestamp = self.safe_integer(response, 'ts')
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
return ticker
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
response = self.marketGetTickers(params)
tickers = self.safe_value(response, 'data')
timestamp = self.safe_integer(response, 'ts')
result = {}
for i in range(0, len(tickers)):
marketId = self.safe_string(tickers[i], 'symbol')
market = self.safe_market(marketId)
symbol = market['symbol']
ticker = self.parse_ticker(tickers[i], market)
ticker['timestamp'] = timestamp
ticker['datetime'] = self.iso8601(timestamp)
result[symbol] = ticker
return self.filter_by_array(result, 'symbol', symbols)
def parse_trade(self, trade, market=None):
#
# fetchTrades(public)
#
# {
# "amount": 0.010411000000000000,
# "trade-id": 102090736910,
# "ts": 1583497692182,
# "id": 10500517034273194594947,
# "price": 9096.050000000000000000,
# "direction": "sell"
# }
#
# fetchMyTrades(private)
#
# {
# 'symbol': 'swftcbtc',
# 'fee-currency': 'swftc',
# 'filled-fees': '0',
# 'source': 'spot-api',
# 'id': 83789509854000,
# 'type': 'buy-limit',
# 'order-id': 83711103204909,
# 'filled-points': '0.005826843283532154',
# 'fee-deduct-currency': 'ht',
# 'filled-amount': '45941.53',
# 'price': '0.0000001401',
# 'created-at': 1597933260729,
# 'match-id': 100087455560,
# 'role': 'maker',
# 'trade-id': 100050305348
# },
#
marketId = self.safe_string(trade, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer_2(trade, 'ts', 'created-at')
order = self.safe_string(trade, 'order-id')
side = self.safe_string(trade, 'direction')
type = self.safe_string(trade, 'type')
if type is not None:
typeParts = type.split('-')
side = typeParts[0]
type = typeParts[1]
takerOrMaker = self.safe_string(trade, 'role')
priceString = self.safe_string(trade, 'price')
amountString = self.safe_string_2(trade, 'filled-amount', 'amount')
price = self.parse_number(priceString)
amount = self.parse_number(amountString)
cost = self.parse_number(Precise.string_mul(priceString, amountString))
fee = None
feeCost = self.safe_number(trade, 'filled-fees')
feeCurrency = self.safe_currency_code(self.safe_string(trade, 'fee-currency'))
filledPoints = self.safe_number(trade, 'filled-points')
if filledPoints is not None:
if (feeCost is None) or (feeCost == 0.0):
feeCost = filledPoints
feeCurrency = self.safe_currency_code(self.safe_string(trade, 'fee-deduct-currency'))
if feeCost is not None:
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
tradeId = self.safe_string_2(trade, 'trade-id', 'tradeId')
id = self.safe_string(trade, 'id', tradeId)
return {
'id': id,
'info': trade,
'order': order,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'type': type,
'side': side,
'takerOrMaker': takerOrMaker,
'price': price,
'amount': amount,
'cost': cost,
'fee': fee,
}
def fetch_order_trades(self, id, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'id': id,
}
response = self.privateGetOrderOrdersIdMatchresults(self.extend(request, params))
return self.parse_trades(response['data'], None, since, limit)
def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = None
request = {}
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
if limit is not None:
request['size'] = limit # 1-100 orders, default is 100
if since is not None:
request['start-date'] = self.ymd(since) # a date within 61 days from today
request['end-date'] = self.ymd(self.sum(since, 86400000))
response = self.privateGetOrderMatchresults(self.extend(request, params))
return self.parse_trades(response['data'], market, since, limit)
def fetch_trades(self, symbol, since=None, limit=1000, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
}
if limit is not None:
request['size'] = limit
response = self.marketGetHistoryTrade(self.extend(request, params))
#
# {
# "status": "ok",
# "ch": "market.btcusdt.trade.detail",
# "ts": 1583497692365,
# "data": [
# {
# "id": 105005170342,
# "ts": 1583497692182,
# "data": [
# {
# "amount": 0.010411000000000000,
# "trade-id": 102090736910,
# "ts": 1583497692182,
# "id": 10500517034273194594947,
# "price": 9096.050000000000000000,
# "direction": "sell"
# }
# ]
# },
# # ...
# ]
# }
#
data = self.safe_value(response, 'data')
result = []
for i in range(0, len(data)):
trades = self.safe_value(data[i], 'data', [])
for j in range(0, len(trades)):
trade = self.parse_trade(trades[j], market)
result.append(trade)
result = self.sort_by(result, 'timestamp')
return self.filter_by_symbol_since_limit(result, symbol, since, limit)
def parse_ohlcv(self, ohlcv, market=None):
#
# {
# "amount":1.2082,
# "open":0.025096,
# "close":0.025095,
# "high":0.025096,
# "id":1591515300,
# "count":6,
# "low":0.025095,
# "vol":0.0303205097
# }
#
return [
self.safe_timestamp(ohlcv, 'id'),
self.safe_number(ohlcv, 'open'),
self.safe_number(ohlcv, 'high'),
self.safe_number(ohlcv, 'low'),
self.safe_number(ohlcv, 'close'),
self.safe_number(ohlcv, 'amount'),
]
def fetch_ohlcv(self, symbol, timeframe='1m', since=None, limit=1000, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'period': self.timeframes[timeframe],
}
if limit is not None:
request['size'] = limit
response = self.marketGetHistoryKline(self.extend(request, params))
#
# {
# "status":"ok",
# "ch":"market.ethbtc.kline.1min",
# "ts":1591515374371,
# "data":[
# {"amount":0.0,"open":0.025095,"close":0.025095,"high":0.025095,"id":1591515360,"count":0,"low":0.025095,"vol":0.0},
# {"amount":1.2082,"open":0.025096,"close":0.025095,"high":0.025096,"id":1591515300,"count":6,"low":0.025095,"vol":0.0303205097},
# {"amount":0.0648,"open":0.025096,"close":0.025096,"high":0.025096,"id":1591515240,"count":2,"low":0.025096,"vol":0.0016262208},
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_ohlcvs(data, market, timeframe, since, limit)
def fetch_accounts(self, params={}):
self.load_markets()
response = self.privateGetAccountAccounts(params)
return response['data']
def fetch_currencies(self, params={}):
request = {
'language': self.options['language'],
}
response = self.publicGetSettingsCurrencys(self.extend(request, params))
currencies = self.safe_value(response, 'data')
result = {}
for i in range(0, len(currencies)):
currency = currencies[i]
#
# { name: "ctxc",
# 'display-name': "CTXC",
# 'withdraw-precision': 8,
# 'currency-type': "eth",
# 'currency-partition': "pro",
# 'support-sites': null,
# 'otc-enable': 0,
# 'deposit-min-amount': "2",
# 'withdraw-min-amount': "4",
# 'show-precision': "8",
# weight: "2988",
# visible: True,
# 'deposit-desc': "Please don’t deposit any other digital assets except CTXC t…",
# 'withdraw-desc': "Minimum withdrawal amount: 4 CTXC. not >_<not For security reason…",
# 'deposit-enabled': True,
# 'withdraw-enabled': True,
# 'currency-addr-with-tag': False,
# 'fast-confirms': 15,
# 'safe-confirms': 30 }
#
id = self.safe_value(currency, 'name')
precision = self.safe_integer(currency, 'withdraw-precision')
code = self.safe_currency_code(id)
active = currency['visible'] and currency['deposit-enabled'] and currency['withdraw-enabled']
name = self.safe_string(currency, 'display-name')
result[code] = {
'id': id,
'code': code,
'type': 'crypto',
# 'payin': currency['deposit-enabled'],
# 'payout': currency['withdraw-enabled'],
# 'transfer': None,
'name': name,
'active': active,
'fee': None, # todo need to fetch from fee endpoint
'precision': precision,
'limits': {
'amount': {
'min': math.pow(10, -precision),
'max': math.pow(10, precision),
},
'deposit': {
'min': self.safe_number(currency, 'deposit-min-amount'),
'max': math.pow(10, precision),
},
'withdraw': {
'min': self.safe_number(currency, 'withdraw-min-amount'),
'max': math.pow(10, precision),
},
},
'info': currency,
}
return result
def fetch_balance(self, params={}):
self.load_markets()
self.load_accounts()
method = self.options['fetchBalanceMethod']
request = {
'id': self.accounts[0]['id'],
}
response = getattr(self, method)(self.extend(request, params))
balances = self.safe_value(response['data'], 'list', [])
result = {'info': response}
for i in range(0, len(balances)):
balance = balances[i]
currencyId = self.safe_string(balance, 'currency')
code = self.safe_currency_code(currencyId)
account = None
if code in result:
account = result[code]
else:
account = self.account()
if balance['type'] == 'trade':
account['free'] = self.safe_string(balance, 'balance')
if balance['type'] == 'frozen':
account['used'] = self.safe_string(balance, 'balance')
result[code] = account
return self.parse_balance(result)
def fetch_orders_by_states(self, states, symbol=None, since=None, limit=None, params={}):
self.load_markets()
request = {
'states': states,
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
method = self.safe_string(self.options, 'fetchOrdersByStatesMethod', 'private_get_order_orders')
response = getattr(self, method)(self.extend(request, params))
#
# {status: "ok",
# data: [{ id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000",
# 'field-cash-amount': "0.001530630000000000",
# 'field-fees': "0.000003061260000000",
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 } ]}
#
return self.parse_orders(response['data'], market, since, limit)
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
request = {
'id': id,
}
response = self.privateGetOrderOrdersId(self.extend(request, params))
order = self.safe_value(response, 'data')
return self.parse_order(order)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_states('pre-submitted,submitted,partial-filled,filled,partial-canceled,canceled', symbol, since, limit, params)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
method = self.safe_string(self.options, 'fetchOpenOrdersMethod', 'fetch_open_orders_v1')
return getattr(self, method)(symbol, since, limit, params)
def fetch_open_orders_v1(self, symbol=None, since=None, limit=None, params={}):
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrdersV1() requires a symbol argument')
return self.fetch_orders_by_states('pre-submitted,submitted,partial-filled', symbol, since, limit, params)
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
return self.fetch_orders_by_states('filled,partial-canceled,canceled', symbol, since, limit, params)
def fetch_open_orders_v2(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
if symbol is None:
raise ArgumentsRequired(self.id + ' fetchOpenOrders() requires a symbol argument')
market = self.market(symbol)
accountId = self.safe_string(params, 'account-id')
if accountId is None:
# pick the first account
self.load_accounts()
for i in range(0, len(self.accounts)):
account = self.accounts[i]
if account['type'] == 'spot':
accountId = self.safe_string(account, 'id')
if accountId is not None:
break
request = {
'symbol': market['id'],
'account-id': accountId,
}
if limit is not None:
request['size'] = limit
omitted = self.omit(params, 'account-id')
response = self.privateGetOrderOpenOrders(self.extend(request, omitted))
#
# {
# "status":"ok",
# "data":[
# {
# "symbol":"ethusdt",
# "source":"api",
# "amount":"0.010000000000000000",
# "account-id":1528640,
# "created-at":1561597491963,
# "price":"400.000000000000000000",
# "filled-amount":"0.0",
# "filled-cash-amount":"0.0",
# "filled-fees":"0.0",
# "id":38477101630,
# "state":"submitted",
# "type":"sell-limit"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_orders(data, market, since, limit)
def parse_order_status(self, status):
statuses = {
'partial-filled': 'open',
'partial-canceled': 'canceled',
'filled': 'closed',
'canceled': 'canceled',
'submitted': 'open',
}
return self.safe_string(statuses, status, status)
def parse_order(self, order, market=None):
#
# { id: 13997833014,
# symbol: "ethbtc",
# 'account-id': 3398321,
# amount: "0.045000000000000000",
# price: "0.034014000000000000",
# 'created-at': 1545836976871,
# type: "sell-limit",
# 'field-amount': "0.045000000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.001530630000000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000003061260000000", # they have fixed it for filled-fees
# 'finished-at': 1545837948214,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 }
#
# { id: 20395337822,
# symbol: "ethbtc",
# 'account-id': 5685075,
# amount: "0.001000000000000000",
# price: "0.0",
# 'created-at': 1545831584023,
# type: "buy-market",
# 'field-amount': "0.029100000000000000", # they have fixed it for filled-amount
# 'field-cash-amount': "0.000999788700000000", # they have fixed it for filled-cash-amount
# 'field-fees': "0.000058200000000000", # they have fixed it for filled-fees
# 'finished-at': 1545831584181,
# source: "spot-api",
# state: "filled",
# 'canceled-at': 0 }
#
id = self.safe_string(order, 'id')
side = None
type = None
status = None
if 'type' in order:
orderType = order['type'].split('-')
side = orderType[0]
type = orderType[1]
status = self.parse_order_status(self.safe_string(order, 'state'))
marketId = self.safe_string(order, 'symbol')
symbol = self.safe_symbol(marketId, market)
timestamp = self.safe_integer(order, 'created-at')
amount = self.safe_number(order, 'amount')
filled = self.safe_number_2(order, 'filled-amount', 'field-amount') # typo in their API, filled amount
price = self.safe_number(order, 'price')
if price == 0.0:
price = None
cost = self.safe_number_2(order, 'filled-cash-amount', 'field-cash-amount') # same typo
feeCost = self.safe_number_2(order, 'filled-fees', 'field-fees') # typo in their API, filled fees
fee = None
if feeCost is not None:
feeCurrency = None
if market is not None:
feeCurrency = market['quote'] if (side == 'sell') else market['base']
fee = {
'cost': feeCost,
'currency': feeCurrency,
}
return self.safe_order({
'info': order,
'id': id,
'clientOrderId': None,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'symbol': symbol,
'type': type,
'timeInForce': None,
'postOnly': None,
'side': side,
'price': price,
'stopPrice': None,
'average': None,
'cost': cost,
'amount': amount,
'filled': filled,
'remaining': None,
'status': status,
'fee': fee,
'trades': None,
})
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
self.load_accounts()
market = self.market(symbol)
request = {
'account-id': self.accounts[0]['id'],
'symbol': market['id'],
'type': side + '-' + type,
}
clientOrderId = self.safe_string_2(params, 'clientOrderId', 'client-order-id') # must be 64 chars max and unique within 24 hours
if clientOrderId is None:
broker = self.safe_value(self.options, 'broker', {})
brokerId = self.safe_string(broker, 'id')
request['client-order-id'] = brokerId + self.uuid()
else:
request['client-order-id'] = clientOrderId
params = self.omit(params, ['clientOrderId', 'client-order-id'])
if (type == 'market') and (side == 'buy'):
if self.options['createMarketBuyOrderRequiresPrice']:
if price is None:
raise InvalidOrder(self.id + " market buy order requires price argument to calculate cost(total amount of quote currency to spend for buying, amount * price). To switch off self warning exception and specify cost in the amount argument, set .options['createMarketBuyOrderRequiresPrice'] = False. Make sure you know what you're doing.")
else:
# despite that cost = amount * price is in quote currency and should have quote precision
# the exchange API requires the cost supplied in 'amount' to be of base precision
# more about it here:
# https://github.com/ccxt/ccxt/pull/4395
# https://github.com/ccxt/ccxt/issues/7611
# we use amountToPrecision here because the exchange requires cost in base precision
request['amount'] = self.cost_to_precision(symbol, float(amount) * float(price))
else:
request['amount'] = self.cost_to_precision(symbol, amount)
else:
request['amount'] = self.amount_to_precision(symbol, amount)
if type == 'limit' or type == 'ioc' or type == 'limit-maker':
request['price'] = self.price_to_precision(symbol, price)
method = self.options['createOrderMethod']
response = getattr(self, method)(self.extend(request, params))
timestamp = self.milliseconds()
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'lastTradeTimestamp': None,
'status': None,
'symbol': symbol,
'type': type,
'side': side,
'price': price,
'amount': amount,
'filled': None,
'remaining': None,
'cost': None,
'trades': None,
'fee': None,
'clientOrderId': None,
'average': None,
}
def cancel_order(self, id, symbol=None, params={}):
response = self.privatePostOrderOrdersIdSubmitcancel({'id': id})
#
# response = {
# 'status': 'ok',
# 'data': '10138899000',
# }
#
return self.extend(self.parse_order(response), {
'id': id,
'status': 'canceled',
})
def cancel_orders(self, ids, symbol=None, params={}):
self.load_markets()
clientOrderIds = self.safe_value_2(params, 'clientOrderIds', 'client-order-ids')
params = self.omit(params, ['clientOrderIds', 'client-order-ids'])
request = {}
if clientOrderIds is None:
request['order-ids'] = ids
else:
request['client-order-ids'] = clientOrderIds
response = self.privatePostOrderOrdersBatchcancel(self.extend(request, params))
#
# {
# "status": "ok",
# "data": {
# "success": [
# "5983466"
# ],
# "failed": [
# {
# "err-msg": "Incorrect order state",
# "order-state": 7,
# "order-id": "",
# "err-code": "order-orderstate-error",
# "client-order-id": "first"
# },
# {
# "err-msg": "Incorrect order state",
# "order-state": 7,
# "order-id": "",
# "err-code": "order-orderstate-error",
# "client-order-id": "second"
# },
# {
# "err-msg": "The record is not found.",
# "order-id": "",
# "err-code": "base-not-found",
# "client-order-id": "third"
# }
# ]
# }
# }
#
return response
def cancel_all_orders(self, symbol=None, params={}):
self.load_markets()
request = {
# 'account-id' string False NA The account id used for self cancel Refer to GET /v1/account/accounts
# 'symbol': market['id'], # a list of comma-separated symbols, all symbols by default
# 'types' 'string', buy-market, sell-market, buy-limit, sell-limit, buy-ioc, sell-ioc, buy-stop-limit, sell-stop-limit, buy-limit-fok, sell-limit-fok, buy-stop-limit-fok, sell-stop-limit-fok
# 'side': 'buy', # or 'sell'
# 'size': 100, # the number of orders to cancel 1-100
}
market = None
if symbol is not None:
market = self.market(symbol)
request['symbol'] = market['id']
response = self.privatePostOrderOrdersBatchCancelOpenOrders(self.extend(request, params))
#
# {
# code: 200,
# data: {
# "success-count": 2,
# "failed-count": 0,
# "next-id": 5454600
# }
# }
#
return response
def currency_to_precision(self, currency, fee):
return self.decimal_to_precision(fee, 0, self.currencies[currency]['precision'])
def parse_deposit_address(self, depositAddress, currency=None):
#
# {
# currency: "eth",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# addressTag: "",
# chain: "eth"
# }
#
address = self.safe_string(depositAddress, 'address')
tag = self.safe_string(depositAddress, 'addressTag')
currencyId = self.safe_string(depositAddress, 'currency')
code = self.safe_currency_code(currencyId)
self.check_address(address)
return {
'currency': code,
'address': address,
'tag': tag,
'info': depositAddress,
}
def fetch_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
request = {
'currency': currency['id'],
}
response = self.v2PrivateGetAccountDepositAddress(self.extend(request, params))
#
# {
# code: 200,
# data: [
# {
# currency: "eth",
# address: "0xf7292eb9ba7bc50358e27f0e025a4d225a64127b",
# addressTag: "",
# chain: "eth"
# }
# ]
# }
#
data = self.safe_value(response, 'data', [])
return self.parse_deposit_address(self.safe_value(data, 0, {}), currency)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'deposit',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = self.privateGetQueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
if limit is None or limit > 100:
limit = 100
self.load_markets()
currency = None
if code is not None:
currency = self.currency(code)
request = {
'type': 'withdraw',
'from': 0, # From 'id' ... if you want to get results after a particular transaction id, pass the id in params.from
}
if currency is not None:
request['currency'] = currency['id']
if limit is not None:
request['size'] = limit # max 100
response = self.privateGetQueryDepositWithdraw(self.extend(request, params))
# return response
return self.parse_transactions(response['data'], currency, since, limit)
def parse_transaction(self, transaction, currency=None):
#
# fetchDeposits
#
# {
# 'id': 8211029,
# 'type': 'deposit',
# 'currency': 'eth',
# 'chain': 'eth',
# 'tx-hash': 'bd315....',
# 'amount': 0.81162421,
# 'address': '4b8b....',
# 'address-tag': '',
# 'fee': 0,
# 'state': 'safe',
# 'created-at': 1542180380965,
# 'updated-at': 1542180788077
# }
#
# fetchWithdrawals
#
# {
# 'id': 6908275,
# 'type': 'withdraw',
# 'currency': 'btc',
# 'chain': 'btc',
# 'tx-hash': 'c1a1a....',
# 'amount': 0.80257005,
# 'address': '1QR....',
# 'address-tag': '',
# 'fee': 0.0005,
# 'state': 'confirmed',
# 'created-at': 1552107295685,
# 'updated-at': 1552108032859
# }
#
timestamp = self.safe_integer(transaction, 'created-at')
updated = self.safe_integer(transaction, 'updated-at')
code = self.safe_currency_code(self.safe_string(transaction, 'currency'))
type = self.safe_string(transaction, 'type')
if type == 'withdraw':
type = 'withdrawal'
status = self.parse_transaction_status(self.safe_string(transaction, 'state'))
tag = self.safe_string(transaction, 'address-tag')
feeCost = self.safe_number(transaction, 'fee')
if feeCost is not None:
feeCost = abs(feeCost)
return {
'info': transaction,
'id': self.safe_string(transaction, 'id'),
'txid': self.safe_string(transaction, 'tx-hash'),
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'address': self.safe_string(transaction, 'address'),
'tag': tag,
'type': type,
'amount': self.safe_number(transaction, 'amount'),
'currency': code,
'status': status,
'updated': updated,
'fee': {
'currency': code,
'cost': feeCost,
'rate': None,
},
}
def parse_transaction_status(self, status):
statuses = {
# deposit statuses
'unknown': 'failed',
'confirming': 'pending',
'confirmed': 'ok',
'safe': 'ok',
'orphan': 'failed',
# withdrawal statuses
'submitted': 'pending',
'canceled': 'canceled',
'reexamine': 'pending',
'reject': 'failed',
'pass': 'pending',
'wallet-reject': 'failed',
# 'confirmed': 'ok', # present in deposit statuses
'confirm-error': 'failed',
'repealed': 'failed',
'wallet-transfer': 'pending',
'pre-transfer': 'pending',
}
return self.safe_string(statuses, status, status)
def withdraw(self, code, amount, address, tag=None, params={}):
self.load_markets()
self.check_address(address)
currency = self.currency(code)
request = {
'address': address, # only supports existing addresses in your withdraw address list
'amount': amount,
'currency': currency['id'].lower(),
}
if tag is not None:
request['addr-tag'] = tag # only for XRP?
response = self.privatePostDwWithdrawApiCreate(self.extend(request, params))
id = self.safe_string(response, 'data')
return {
'info': response,
'id': id,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
url = '/'
if api == 'market':
url += api
elif (api == 'public') or (api == 'private'):
url += self.version
elif (api == 'v2Public') or (api == 'v2Private'):
url += 'v2'
url += '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if api == 'private' or api == 'v2Private':
self.check_required_credentials()
timestamp = self.ymdhms(self.milliseconds(), 'T')
request = {
'SignatureMethod': 'HmacSHA256',
'SignatureVersion': '2',
'AccessKeyId': self.apiKey,
'Timestamp': timestamp,
}
if method != 'POST':
request = self.extend(request, query)
request = self.keysort(request)
auth = self.urlencode(request)
# unfortunately, PHP demands double quotes for the escaped newline symbol
# eslint-disable-next-line quotes
payload = "\n".join([method, self.hostname, url, auth])
signature = self.hmac(self.encode(payload), self.encode(self.secret), hashlib.sha256, 'base64')
auth += '&' + self.urlencode({'Signature': signature})
url += '?' + auth
if method == 'POST':
body = self.json(query)
headers = {
'Content-Type': 'application/json',
}
else:
headers = {
'Content-Type': 'application/x-www-form-urlencoded',
}
else:
if params:
url += '?' + self.urlencode(params)
url = self.implode_params(self.urls['api'][api], {
'hostname': self.hostname,
}) + url
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, httpCode, reason, url, method, headers, body, response, requestHeaders, requestBody):
if response is None:
return # fallback to default error handler
if 'status' in response:
#
# {"status":"error","err-code":"order-limitorder-amount-min-error","err-msg":"limit order amount error, min: `0.001`","data":null}
#
status = self.safe_string(response, 'status')
if status == 'error':
code = self.safe_string(response, 'err-code')
feedback = self.id + ' ' + body
self.throw_broadly_matched_exception(self.exceptions['broad'], body, feedback)
self.throw_exactly_matched_exception(self.exceptions['exact'], code, feedback)
message = self.safe_string(response, 'err-msg')
self.throw_exactly_matched_exception(self.exceptions['exact'], message, feedback)
raise ExchangeError(feedback)
| 44.737401 | 355 | 0.482228 |
aceeda4bb5eb6994c12b05a4a6f4af19b3482510 | 661 | py | Python | manage.py | majeedkarimi/resume_Myour | d632df4898155e956a73734c53d4086716d54ff0 | [
"MIT"
] | null | null | null | manage.py | majeedkarimi/resume_Myour | d632df4898155e956a73734c53d4086716d54ff0 | [
"MIT"
] | null | null | null | manage.py | majeedkarimi/resume_Myour | d632df4898155e956a73734c53d4086716d54ff0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Myour.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| 28.73913 | 73 | 0.677761 |
aceedbb3eeca697000bcca181520a77ce3db9618 | 314 | py | Python | accesses/admin.py | emil-magnusson/py-on-api | 50967ea9d6a189c2c1cb75bd3e2b8ab817077634 | [
"MIT"
] | null | null | null | accesses/admin.py | emil-magnusson/py-on-api | 50967ea9d6a189c2c1cb75bd3e2b8ab817077634 | [
"MIT"
] | 4 | 2021-03-30T14:10:30.000Z | 2021-09-22T19:29:56.000Z | accesses/admin.py | emil-magnusson/py-on-api | 50967ea9d6a189c2c1cb75bd3e2b8ab817077634 | [
"MIT"
] | null | null | null | from django.contrib import admin
from .models import Accesses, Services, AccesState, PremisesType, CoNetworkAgreement
admin.site.register(Accesses)
admin.site.register(Services)
admin.site.register(AccesState)
admin.site.register(PremisesType)
admin.site.register(CoNetworkAgreement)
# Register your models here.
| 31.4 | 84 | 0.83758 |
aceedd33c6d859c3b0dd17ca5a7650ae8e228559 | 30 | py | Python | test/integrity/__init__.py | LibertyAces/BitSwanPump | 02301bfd4e807836403ce6a22030ad47058541d6 | [
"BSD-3-Clause"
] | 17 | 2019-02-14T09:26:03.000Z | 2022-03-11T09:23:52.000Z | test/integrity/__init__.py | LibertyAces/BitSwanPump | 02301bfd4e807836403ce6a22030ad47058541d6 | [
"BSD-3-Clause"
] | 91 | 2019-05-06T18:59:02.000Z | 2022-01-11T06:22:32.000Z | test/integrity/__init__.py | LibertyAces/BitSwanPump | 02301bfd4e807836403ce6a22030ad47058541d6 | [
"BSD-3-Clause"
] | 10 | 2019-04-23T08:48:58.000Z | 2022-02-13T14:24:28.000Z | from .test_integrity import *
| 15 | 29 | 0.8 |
aceedd6a3a51f2507ca6c4c764d301ebd5d2f0eb | 526 | py | Python | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/iraq-33643 | 639cb4af4c268c67d82f5297813e801365c5b489 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/iraq-33643 | 639cb4af4c268c67d82f5297813e801365c5b489 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | backend/home/migrations/0001_load_initial_data.py | crowdbotics-apps/iraq-33643 | 639cb4af4c268c67d82f5297813e801365c5b489 | [
"FTL",
"AML",
"RSA-MD"
] | null | null | null | from django.db import migrations
def create_site(apps, schema_editor):
Site = apps.get_model("sites", "Site")
custom_domain = "iraq-33643.botics.co"
site_params = {
"name": "Iraq",
}
if custom_domain:
site_params["domain"] = custom_domain
Site.objects.update_or_create(defaults=site_params, id=1)
class Migration(migrations.Migration):
dependencies = [
("sites", "0002_alter_domain_unique"),
]
operations = [
migrations.RunPython(create_site),
]
| 20.230769 | 61 | 0.65019 |
aceedeff8f5785f68ea9d735494b6b31b450903b | 3,742 | py | Python | soaplib/core/model/exception.py | divaliu1408/overfit | 083dcfaa758391092933e19544462cd831e73ef0 | [
"Apache-2.0"
] | null | null | null | soaplib/core/model/exception.py | divaliu1408/overfit | 083dcfaa758391092933e19544462cd831e73ef0 | [
"Apache-2.0"
] | null | null | null | soaplib/core/model/exception.py | divaliu1408/overfit | 083dcfaa758391092933e19544462cd831e73ef0 | [
"Apache-2.0"
] | null | null | null |
#
# soaplib - Copyright (C) Soaplib contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
from soaplib.core import namespaces
from lxml import etree
from soaplib.core.model import Base
_ns_xsd = namespaces.ns_xsd
_pref_soap_env = namespaces.const_prefmap[namespaces.ns_soap_env]
class Fault(Exception, Base):
__type_name__ = "Fault"
def __init__(self, faultcode='Server', faultstring="",
faultactor="", detail=None):
if faultcode.startswith('%s:' % _pref_soap_env):
self.faultcode = faultcode
else:
self.faultcode = '%s:%s' % (_pref_soap_env, faultcode)
self.faultstring = faultstring or self.get_type_name()
self.faultactor = faultactor
self.detail = detail
def __repr__(self):
return "%s: %r" % (self.faultcode, self.faultstring)
@classmethod
def to_parent_element(cls, value, tns, parent_elt, name=None):
assert name is None
element = etree.SubElement(parent_elt,
"{%s}Fault" % namespaces.ns_soap_env)
etree.SubElement(element, 'faultcode').text = value.faultcode
etree.SubElement(element, 'faultstring').text = value.faultstring
etree.SubElement(element, 'faultactor').text = value.faultactor
if value.detail != None:
etree.SubElement(element, 'detail').append(value.detail)
def add_to_parent_element(self, tns, parent):
self.__class__.to_parent_element(self, tns, parent, name=None)
@classmethod
def from_xml(cls, element):
code = element.find('faultcode').text
string = element.find('faultstring').text
factor = element.find('faultactor').text
detail = element.find('detail')
return cls(faultcode=code, faultstring=string,
faultactor=factor, detail=detail)
@classmethod
def add_to_schema(cls, schema_dict):
app = schema_dict.app
complex_type = etree.Element('{%s}complexType' % _ns_xsd)
complex_type.set('name', '%sFault' % cls.get_type_name())
extends = getattr(cls, '__extends__', None)
if extends is not None:
complex_content = etree.SubElement(complex_type,
'{%s}complexContent' % _ns_xsd)
extension = etree.SubElement(complex_content, "{%s}extension"
% namespaces.ns_xsd)
extension.set('base', extends.get_type_name_ns(app))
sequence_parent = extension
else:
sequence_parent = complex_type
seq = etree.SubElement(sequence_parent, '{%s}sequence' % _ns_xsd)
schema_dict.add_complex_type(cls, complex_type)
top_level_element = etree.Element('{%s}element' % _ns_xsd)
top_level_element.set('name', cls.get_type_name())
top_level_element.set('{%s}type' % _ns_xsd,
'%sFault' % cls.get_type_name_ns(app))
schema_dict.add_element(cls, top_level_element)
| 38.979167 | 80 | 0.652058 |
aceee10a0ab9861e8c5abdbb073a79377fec6861 | 1,292 | py | Python | {{ cookiecutter.project_name }}/hello_world/app.py | arpena/cookiecutter-aws-sam-python | 9a31ea83687a8b2da09f473eb5dc1694706ae2ae | [
"MIT-0"
] | 189 | 2018-04-10T10:16:05.000Z | 2022-03-24T20:59:28.000Z | {{ cookiecutter.project_name }}/hello_world/app.py | arpena/cookiecutter-aws-sam-python | 9a31ea83687a8b2da09f473eb5dc1694706ae2ae | [
"MIT-0"
] | 42 | 2018-04-26T12:54:58.000Z | 2022-02-04T11:45:17.000Z | {{ cookiecutter.project_name }}/hello_world/app.py | arpena/cookiecutter-aws-sam-python | 9a31ea83687a8b2da09f473eb5dc1694706ae2ae | [
"MIT-0"
] | 78 | 2018-04-10T15:07:47.000Z | 2022-02-05T02:04:29.000Z | import json
import os
import boto3
from aws_lambda_powertools import Logger, Metrics, Tracer
from aws_lambda_powertools.logging import correlation_paths
from aws_lambda_powertools.utilities.typing import LambdaContext
from aws_lambda_powertools.event_handler.api_gateway import ApiGatewayResolver
# https://awslabs.github.io/aws-lambda-powertools-python/#features
tracer = Tracer()
logger = Logger()
metrics = Metrics()
app = ApiGatewayResolver()
# Global variables are reused across execution contexts (if available)
# session = boto3.Session()
@app.get("/hello")
def hello():
query_string_name = app.current_event.get_query_string_value(name="name", default_value="universe")
return {"message": f"hello {query_string_name}"}
@app.get("/hello/<name>")
def hello_you(name):
# query_strings_as_dict = app.current_event.query_string_parameters
# json_payload = app.current_event.json_body
return {"message": f"hello {name}"}
@metrics.log_metrics(capture_cold_start_metric=True)
@logger.inject_lambda_context(correlation_id_path=correlation_paths.API_GATEWAY_REST)
@tracer.capture_lambda_handler
def lambda_handler(event, context: LambdaContext):
try:
return app.resolve(event, context)
except Exception as e:
logger.exception(e)
raise
| 32.3 | 103 | 0.782508 |
aceee190c830881841b3d82438cdab5287b4c831 | 475 | py | Python | msticpy/sectools/__init__.py | ianhelle/msticpy | 16f08d0b0453423174a0895e94cf31b6a9c48b13 | [
"MIT"
] | 1 | 2022-02-03T16:59:44.000Z | 2022-02-03T16:59:44.000Z | msticpy/sectools/__init__.py | ianhelle/msticpy | 16f08d0b0453423174a0895e94cf31b6a9c48b13 | [
"MIT"
] | null | null | null | msticpy/sectools/__init__.py | ianhelle/msticpy | 16f08d0b0453423174a0895e94cf31b6a9c48b13 | [
"MIT"
] | 2 | 2019-03-23T14:21:16.000Z | 2020-10-26T08:46:16.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""MSTIC Security Tools."""
# flake8: noqa: F403
from . import base64unpack as b64
from . iocextract import *
from . vtlookup import *
from . eventcluster import *
| 36.538462 | 76 | 0.509474 |
aceee1d3933a193868b3baa08aba78d539005c55 | 3,304 | py | Python | train.py | sebasvega95/neural-style-transfer | 3229a7399d6dde57a2ee9a691d1de0946861cfac | [
"MIT"
] | 4 | 2018-06-25T23:35:51.000Z | 2019-04-11T03:37:28.000Z | train.py | sebasvega95/neural-style-transfer | 3229a7399d6dde57a2ee9a691d1de0946861cfac | [
"MIT"
] | 4 | 2018-06-26T13:14:11.000Z | 2018-07-04T23:17:42.000Z | train.py | sebasvega95/neural-style-transfer | 3229a7399d6dde57a2ee9a691d1de0946861cfac | [
"MIT"
] | 3 | 2018-06-26T17:46:36.000Z | 2018-10-11T13:48:11.000Z | import argparse
import os
import pathlib
import shutil
import tensorflow as tf
from loss import get_total_loss
from model import create_resnet
import precompute
import utils
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--style', type=str, help='Path to style image', required=True)
parser.add_argument('--train', type=str, help='Path to training (content) images', required=True)
parser.add_argument('--weights', type=str, help='Path where to save the model\'s weights', required=True)
return parser.parse_args()
def check_args(args):
if not utils.path_exists(args.style):
print('Style image not found in', args.style)
exit(-1)
if not utils.path_exists(args.train):
print('Train path', args.style, 'not found')
exit(-1)
pathlib.Path(args.weights).parent.mkdir(parents=True, exist_ok=True)
args = parse_args()
check_args(args)
epochs = 2
batch_size = 4
img_height = 256
img_width = 256
num_images = utils.get_num_images(args.train)
if num_images <= 0:
print('No images found in', args.train)
tf.reset_default_graph()
style_image = utils.preprocess_image_from_path(args.style)
print('Precomputing style gram matrices')
style_grams = precompute.style_grams(style_image)
print('Creating model')
transformation_model = create_resnet(input_shape=(None, None, 3), name='transformation_net')
with tf.Session() as sess:
print('Defining loss')
total_loss = get_total_loss(
transformation_model,
style_grams,
content_weight=2,
style_weight=1e2,
total_variation_weight=1e-5,
batch_size=batch_size,
name='loss')
with tf.name_scope('optimizer'):
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='transformation_net')
extra_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
optimizer = tf.train.AdamOptimizer()
with tf.control_dependencies(extra_ops):
train_op = optimizer.minimize(total_loss, var_list=var_list)
opt_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='optimizer')
model_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='transformation_net')
init = tf.variables_initializer(var_list=opt_vars + model_vars)
sess.run(init)
print('Training')
shutil.rmtree('logs', ignore_errors=True)
summaries = tf.summary.merge_all()
img_gen = utils.image_generator(args.train, batch_size=batch_size, target_shape=(img_width, img_height))
writer = tf.summary.FileWriter('logs', sess.graph)
global_step = 0
for epoch in range(epochs):
step = 0
while step * batch_size < num_images:
images = next(img_gen)
_, step_loss, summary = sess.run(
[train_op, total_loss, summaries],
feed_dict={
transformation_model.input: images,
tf.keras.backend.learning_phase(): 1
})
if step % 500 == 0:
print('Epoch {}, step {}: loss: {}'.format(epoch, step, step_loss))
writer.add_summary(summary, global_step)
step += 1
global_step += 1
transformation_model.save_weights(args.weights)
| 33.373737 | 109 | 0.68069 |
aceee33d81bd952e8035a7e3d6e4c634a6fe0600 | 3,062 | py | Python | tests/test_subset.py | benjcunningham/subsets | 806a1934650e5cfb3306e6528a6a56c148e399c4 | [
"MIT"
] | null | null | null | tests/test_subset.py | benjcunningham/subsets | 806a1934650e5cfb3306e6528a6a56c148e399c4 | [
"MIT"
] | 9 | 2018-11-08T03:37:34.000Z | 2018-11-22T22:20:27.000Z | tests/test_subset.py | benjcunningham/subsets | 806a1934650e5cfb3306e6528a6a56c148e399c4 | [
"MIT"
] | null | null | null | """Test Subset class
"""
import unittest
import pydub
import srt
import pandas as pd
from subsets import Subset
class TestSubset(unittest.TestCase):
"""Tests of Subset objects"""
def setUp(self):
self.args = {
"subs": "tests/test_files/nge.srt",
"audio": "tests/test_files/nge.aac"
}
def test_read_subs(self):
"""Read subtitle file into object"""
sub = Subset(**self.args)
sub.read_subs(self.args["subs"])
self.assertTrue(hasattr(sub, "subs"))
self.assertIsInstance(sub.subs, list)
self.assertIsInstance(sub.subs[0], srt.Subtitle)
def test_read_audio(self):
"""Read audio file into object"""
sub = Subset(**self.args)
sub.read_audio(self.args["audio"])
self.assertTrue(hasattr(sub, "audio"))
self.assertIsInstance(sub.audio, pydub.AudioSegment)
def test_split(self):
"""Split audio based on integer bounds"""
sub = Subset(**self.args)
bound = (0, 100)
split = sub._split(bound)
self.assertIsInstance(split, pydub.AudioSegment)
self.assertEqual(len(split), 100)
def test_split_audio(self):
"""Split audio based on subtitles"""
sub = Subset(**self.args)
sub.split_audio()
self.assertTrue(hasattr(sub, "splits"))
self.assertIsInstance(sub.splits, list)
self.assertIsInstance(sub.splits[0], pydub.AudioSegment)
self.assertEqual(len(sub.splits), len(sub.subs))
def test_init(self):
"""Initialize object with subs and audio"""
sub = Subset(**self.args)
self.assertTrue(hasattr(sub, "subs"))
self.assertTrue(hasattr(sub, "audio"))
self.assertTrue(hasattr(sub, "splits"))
self.assertIsInstance(sub.subs, list)
self.assertIsInstance(sub.subs[0], srt.Subtitle)
self.assertIsInstance(sub.audio, pydub.AudioSegment)
self.assertIsInstance(sub.splits, list)
self.assertIsInstance(sub.splits[0], pydub.AudioSegment)
def test_init_kwargs(self):
"""Initialize object with kwargs"""
subs_kwargs = {"encoding": "utf-8"}
audio_kwargs = {"format": "aac"}
sub = Subset(**self.args,
subs_kwargs=subs_kwargs,
audio_kwargs=audio_kwargs)
self.assertTrue(hasattr(sub, "subs"))
self.assertTrue(hasattr(sub, "audio"))
self.assertTrue(hasattr(sub, "splits"))
self.assertIsInstance(sub.subs, list)
self.assertIsInstance(sub.subs[0], srt.Subtitle)
self.assertIsInstance(sub.audio, pydub.AudioSegment)
self.assertIsInstance(sub.splits, list)
self.assertIsInstance(sub.splits[0], pydub.AudioSegment)
def test_to_table(self):
"""Convert subs to data frame"""
sub = Subset(**self.args)
table = sub.to_table()
self.assertIsInstance(table, pd.DataFrame)
self.assertEqual(table.shape, (3, 5))
if __name__ == "__main__":
unittest.main()
| 27.339286 | 64 | 0.619856 |
aceee3d5a36a29784cf1de34fb9c0dbf1fedced7 | 58,663 | py | Python | src/masternode_details.py | gewelio/gewel-masternode-tool | 3d18a68789f6428f9f631689d19c69eb8d384fa4 | [
"MIT"
] | null | null | null | src/masternode_details.py | gewelio/gewel-masternode-tool | 3d18a68789f6428f9f631689d19c69eb8d384fa4 | [
"MIT"
] | null | null | null | src/masternode_details.py | gewelio/gewel-masternode-tool | 3d18a68789f6428f9f631689d19c69eb8d384fa4 | [
"MIT"
] | null | null | null | import os
import sys
from enum import Enum
from functools import partial
from typing import Callable
import bitcoin
from PyQt5 import QtCore
from PyQt5.QtCore import QSize, pyqtSlot, Qt
from PyQt5.QtGui import QPixmap, QTextDocument
from PyQt5.QtWidgets import QDialog, QWidget, QLineEdit, QMessageBox, QAction, QApplication, QActionGroup
import gewel_utils
import hw_intf
from app_config import MasternodeConfig, GMN_ROLE_OWNER, GMN_ROLE_OPERATOR, GMN_ROLE_VOTING, InputKeyType
from bip44_wallet import Bip44Wallet, BreakFetchTransactionsException
from common import CancelException
from find_coll_tx_dlg import ListCollateralTxsDlg
from thread_fun_dlg import CtrlObject
from ui import ui_masternode_details
from wnd_utils import WndUtils
class WdgMasternodeDetails(QWidget, ui_masternode_details.Ui_WdgMasternodeDetails):
name_modified = QtCore.pyqtSignal(str)
data_changed = QtCore.pyqtSignal(object)
role_modified = QtCore.pyqtSignal()
label_width_changed = QtCore.pyqtSignal(int)
def __init__(self, main_dlg, app_config, geweld_intf):
QWidget.__init__(self, main_dlg)
ui_masternode_details.Ui_WdgMasternodeDetails.__init__(self)
self.main_dlg = main_dlg
self.app_config = app_config
self.geweld_intf = geweld_intf
self.masternode: MasternodeConfig = None
self.updating_ui = False
self.edit_mode = False
self.setupUi()
def setupUi(self):
ui_masternode_details.Ui_WdgMasternodeDetails.setupUi(self, self)
self.main_dlg.setIcon(self.btnShowMnPrivateKey, 'eye@16px.png')
self.main_dlg.setIcon(self.btnShowOwnerPrivateKey, 'eye@16px.png')
self.main_dlg.setIcon(self.btnShowOperatorPrivateKey, 'eye@16px.png')
self.main_dlg.setIcon(self.btnShowVotingPrivateKey, 'eye@16px.png')
self.main_dlg.setIcon(self.btnCopyMnKey, 'content-copy@16px.png')
self.main_dlg.setIcon(self.btnCopyOwnerKey, 'content-copy@16px.png')
self.main_dlg.setIcon(self.btnCopyOperatorKey, 'content-copy@16px.png')
self.main_dlg.setIcon(self.btnCopyVotingKey, 'content-copy@16px.png')
self.main_dlg.setIcon(self.btnShowCollateralPathAddress, 'eye@16px.png')
self.act_view_as_mn_private_key = QAction('View as private key', self)
self.act_view_as_mn_private_key.setData('privkey')
self.act_view_as_mn_private_key.triggered.connect(self.on_masternode_view_key_type_changed)
self.act_view_as_mn_public_address = QAction('View as Gewel address', self)
self.act_view_as_mn_public_address.setData('address')
self.act_view_as_mn_public_address.triggered.connect(self.on_masternode_view_key_type_changed)
self.act_view_as_mn_public_key = QAction('View as public key', self)
self.act_view_as_mn_public_key.setData('pubkey')
self.act_view_as_mn_public_key.triggered.connect(self.on_masternode_view_key_type_changed)
self.act_view_as_mn_public_key_hash = QAction('View as public key hash', self)
self.act_view_as_mn_public_key_hash.setData('pubkeyhash')
self.act_view_as_mn_public_key_hash.triggered.connect(self.on_masternode_view_key_type_changed)
self.ag_mn_key = QActionGroup(self)
self.act_view_as_mn_private_key.setCheckable(True)
self.act_view_as_mn_public_address.setCheckable(True)
self.act_view_as_mn_public_key.setCheckable(True)
self.act_view_as_mn_public_key_hash.setCheckable(True)
self.act_view_as_mn_private_key.setActionGroup(self.ag_mn_key)
self.act_view_as_mn_public_address.setActionGroup(self.ag_mn_key)
self.act_view_as_mn_public_key.setActionGroup(self.ag_mn_key)
self.act_view_as_mn_public_key_hash.setActionGroup(self.ag_mn_key)
self.btnShowMnPrivateKey.addActions((self.act_view_as_mn_private_key, self.act_view_as_mn_public_address,
self.act_view_as_mn_public_key, self.act_view_as_mn_public_key_hash))
self.act_view_as_owner_private_key = QAction('View as private key', self)
self.act_view_as_owner_private_key.setData('privkey')
self.act_view_as_owner_private_key.triggered.connect(self.on_owner_view_key_type_changed)
self.act_view_as_owner_public_address = QAction('View as Gewel address', self)
self.act_view_as_owner_public_address.setData('address')
self.act_view_as_owner_public_address.triggered.connect(self.on_owner_view_key_type_changed)
self.act_view_as_owner_public_key = QAction('View as public key', self)
self.act_view_as_owner_public_key.setData('pubkey')
self.act_view_as_owner_public_key.triggered.connect(self.on_owner_view_key_type_changed)
self.act_view_as_owner_public_key_hash = QAction('View as public key hash', self)
self.act_view_as_owner_public_key_hash.setData('pubkeyhash')
self.act_view_as_owner_public_key_hash.triggered.connect(self.on_owner_view_key_type_changed)
self.ag_owner_key = QActionGroup(self)
self.act_view_as_owner_private_key.setCheckable(True)
self.act_view_as_owner_public_address.setCheckable(True)
self.act_view_as_owner_public_key.setCheckable(True)
self.act_view_as_owner_public_key_hash.setCheckable(True)
self.act_view_as_owner_private_key.setActionGroup(self.ag_owner_key)
self.act_view_as_owner_public_address.setActionGroup(self.ag_owner_key)
self.act_view_as_owner_public_key.setActionGroup(self.ag_owner_key)
self.act_view_as_owner_public_key_hash.setActionGroup(self.ag_owner_key)
self.btnShowOwnerPrivateKey.addActions(
(self.act_view_as_owner_private_key, self.act_view_as_owner_public_address,
self.act_view_as_owner_public_key, self.act_view_as_owner_public_key_hash))
self.act_view_as_voting_private_key = QAction('View as private key', self)
self.act_view_as_voting_private_key.setData('privkey')
self.act_view_as_voting_private_key.triggered.connect(self.on_voting_view_key_type_changed)
self.act_view_as_voting_public_address = QAction('View as Gewel address', self)
self.act_view_as_voting_public_address.setData('address')
self.act_view_as_voting_public_address.triggered.connect(self.on_voting_view_key_type_changed)
self.act_view_as_voting_public_key = QAction('View as public key', self)
self.act_view_as_voting_public_key.setData('pubkey')
self.act_view_as_voting_public_key.triggered.connect(self.on_voting_view_key_type_changed)
self.act_view_as_voting_public_key_hash = QAction('View as public key hash', self)
self.act_view_as_voting_public_key_hash.setData('pubkeyhash')
self.act_view_as_voting_public_key_hash.triggered.connect(self.on_voting_view_key_type_changed)
self.ag_voting_key = QActionGroup(self)
self.act_view_as_voting_private_key.setCheckable(True)
self.act_view_as_voting_public_address.setCheckable(True)
self.act_view_as_voting_public_key.setCheckable(True)
self.act_view_as_voting_public_key_hash.setCheckable(True)
self.act_view_as_voting_private_key.setActionGroup(self.ag_voting_key)
self.act_view_as_voting_public_address.setActionGroup(self.ag_voting_key)
self.act_view_as_voting_public_key.setActionGroup(self.ag_voting_key)
self.act_view_as_voting_public_key_hash.setActionGroup(self.ag_voting_key)
self.btnShowVotingPrivateKey.addActions((self.act_view_as_voting_private_key,
self.act_view_as_voting_public_address,
self.act_view_as_voting_public_key,
self.act_view_as_voting_public_key_hash))
self.act_view_as_operator_private_key = QAction('View as private key', self)
self.act_view_as_operator_private_key.setData('privkey')
self.act_view_as_operator_private_key.triggered.connect(self.on_operator_view_key_type_changed)
self.act_view_as_operator_public_key = QAction('View as public key', self)
self.act_view_as_operator_public_key.setData('pubkey')
self.act_view_as_operator_public_key.triggered.connect(self.on_operator_view_key_type_changed)
self.ag_operator_key = QActionGroup(self)
self.act_view_as_operator_private_key.setCheckable(True)
self.act_view_as_operator_public_key.setCheckable(True)
self.act_view_as_operator_private_key.setActionGroup(self.ag_operator_key)
self.act_view_as_operator_public_key.setActionGroup(self.ag_operator_key)
self.btnShowOperatorPrivateKey.addActions((self.act_view_as_operator_private_key,
self.act_view_as_operator_public_key))
self.update_ui_controls_state()
def showEvent(self, QShowEvent):
self.update_key_controls_state() # qt 0.9.2: control styles aren't updated properly without reapplying
# them here
self.lblOwnerKey.fontMetrics()
def update_ui_controls_state(self):
"""Update visibility and enabled/disabled state of the UI controls.
"""
if self.masternode:
is_deterministic = self.masternode.is_deterministic
else:
is_deterministic = False
if self.masternode:
self.lblTitle.setVisible(True)
self.lblAction.setVisible(self.edit_mode is True)
if is_deterministic:
lbl = '<span>Deterministic masternode</span>'
lbl_action = '<a href="change-to-non-gmn">Alter configuration to non-deterministic</a>'
color = '#2eb82e'
else:
lbl = '<span>Non-deterministic masternode</span>'
lbl_action = '<a href="change-to-gmn">Alter configuration to deterministic</a>'
color = 'navy'
self.lblTitle.setText(lbl)
self.lblTitle.setStyleSheet(
f'QLabel{{background-color:{color};color:white;padding:3px 5px 3px 5px; border-radius:3px}}')
self.lblAction.setText(lbl_action)
else:
self.lblTitle.setVisible(False)
self.lblAction.setVisible(False)
self.lblGMNTxHash.setVisible(self.masternode is not None and is_deterministic)
self.edtGMNTxHash.setVisible(self.masternode is not None and is_deterministic)
self.btnFindGMNTxHash.setVisible(self.masternode is not None and self.edit_mode and is_deterministic)
self.lblCollateral.setVisible(self.masternode is not None and
((self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0) or not is_deterministic))
self.btnLocateCollateral.setVisible(self.masternode is not None and self.edit_mode and
((self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0)
or not is_deterministic))
self.btnBip32PathToAddress.setVisible(self.masternode is not None and self.edit_mode and
((self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0)
or not is_deterministic))
self.btnShowCollateralPathAddress.setVisible(self.masternode is not None and
((self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0)
or not is_deterministic))
self.edtCollateralAddress.setVisible(self.masternode is not None and
((self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0)
or not is_deterministic))
self.lblCollateralPath.setVisible(self.masternode is not None and
((self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0)
or not is_deterministic))
self.edtCollateralPath.setVisible(self.masternode is not None and
((self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0)
or not is_deterministic))
self.lblOwnerKey.setVisible(self.masternode is not None and is_deterministic and
(self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0))
self.edtOwnerKey.setVisible(self.masternode is not None and is_deterministic and
(self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0))
self.btnShowOwnerPrivateKey.setVisible(self.masternode is not None and is_deterministic and
self.edit_mode is False and
(self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0))
self.btnCopyOwnerKey.setVisible(self.masternode is not None and is_deterministic and
(self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0))
self.lblOperatorKey.setVisible(self.masternode is not None and is_deterministic and
(self.masternode.gmn_user_roles & GMN_ROLE_OPERATOR > 0))
self.edtOperatorKey.setVisible(self.masternode is not None and is_deterministic and
(self.masternode.gmn_user_roles & GMN_ROLE_OPERATOR > 0))
self.btnShowOperatorPrivateKey.setVisible(self.masternode is not None and is_deterministic and
self.edit_mode is False and
(self.masternode.gmn_user_roles & GMN_ROLE_OPERATOR > 0))
self.btnCopyOperatorKey.setVisible(self.masternode is not None and is_deterministic and
(self.masternode.gmn_user_roles & GMN_ROLE_OPERATOR > 0))
self.lblVotingKey.setVisible(self.masternode is not None and is_deterministic and
(self.masternode.gmn_user_roles & GMN_ROLE_VOTING > 0))
self.edtVotingKey.setVisible(self.masternode is not None and is_deterministic and
(self.masternode.gmn_user_roles & GMN_ROLE_VOTING > 0))
self.btnShowVotingPrivateKey.setVisible(self.masternode is not None and is_deterministic and
self.edit_mode is False and
(self.masternode.gmn_user_roles & GMN_ROLE_VOTING > 0))
self.btnCopyVotingKey.setVisible(self.masternode is not None and is_deterministic and
(self.masternode.gmn_user_roles & GMN_ROLE_VOTING > 0))
self.act_view_as_owner_private_key.setVisible(self.masternode is not None and
self.masternode.gmn_owner_key_type == InputKeyType.PRIVATE)
self.act_view_as_owner_public_key.setVisible(self.masternode is not None and
self.masternode.gmn_owner_key_type == InputKeyType.PRIVATE)
self.act_view_as_operator_private_key.setVisible(self.masternode is not None and
self.masternode.gmn_operator_key_type == InputKeyType.PRIVATE)
self.act_view_as_voting_private_key.setVisible(self.masternode is not None and
self.masternode.gmn_voting_key_type == InputKeyType.PRIVATE)
self.act_view_as_voting_public_key.setVisible(self.masternode is not None and
self.masternode.gmn_voting_key_type == InputKeyType.PRIVATE)
self.btnGenerateMnPrivateKey.setVisible(
self.masternode is not None and self.edit_mode and
((self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0) or not is_deterministic))
self.btnGenerateOwnerPrivateKey.setVisible(
self.masternode is not None and is_deterministic and self.edit_mode and
self.masternode.gmn_owner_key_type == InputKeyType.PRIVATE and
self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0)
self.btnGenerateOperatorPrivateKey.setVisible(
self.masternode is not None and is_deterministic and self.edit_mode and
self.masternode.gmn_operator_key_type == InputKeyType.PRIVATE and
self.masternode.gmn_user_roles & GMN_ROLE_OPERATOR > 0)
self.btnGenerateVotingPrivateKey.setVisible(
self.masternode is not None and is_deterministic and self.edit_mode and
self.masternode.gmn_voting_key_type == InputKeyType.PRIVATE and
self.masternode.gmn_user_roles & GMN_ROLE_VOTING > 0)
self.lblUserRole.setVisible(self.masternode is not None and is_deterministic)
self.chbRoleOwner.setVisible(self.masternode is not None and is_deterministic)
self.chbRoleOperator.setVisible(self.masternode is not None and is_deterministic)
self.chbRoleVoting.setVisible(self.masternode is not None and is_deterministic)
self.lblMasternodePrivateKey.setVisible(self.masternode is not None and
self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0)
self.edtMasternodePrivateKey.setVisible(self.masternode is not None and
self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0)
self.btnShowMnPrivateKey.setVisible(self.masternode is not None and self.edit_mode is False and
self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0)
self.btnCopyMnKey.setVisible(self.masternode is not None and
self.masternode.gmn_user_roles & GMN_ROLE_OWNER > 0)
# self.btnFindCollateral.setVisible(self.masternode is not None)
self.lblIP.setVisible(self.masternode is not None)
self.edtIP.setVisible(self.masternode is not None)
self.lblPort.setVisible(self.masternode is not None)
self.edtPort.setVisible(self.masternode is not None)
self.lblProtocolVersion.setVisible(self.masternode is not None and not is_deterministic)
self.edtProtocolVersion.setVisible(self.masternode is not None and not is_deterministic)
self.lblName.setVisible(self.masternode is not None)
self.edtName.setVisible(self.masternode is not None)
self.lblCollateralTxHash.setVisible(self.masternode is not None)
self.edtCollateralTxHash.setVisible(self.masternode is not None)
self.lblCollateralTxIndex.setVisible(self.masternode is not None)
self.edtCollateralTxIndex.setVisible(self.masternode is not None)
self.chbRoleVoting.setEnabled(self.edit_mode)
self.chbRoleOperator.setEnabled(self.edit_mode)
self.chbRoleOwner.setEnabled(self.edit_mode)
self.edtName.setReadOnly(self.edit_mode is False)
self.edtIP.setReadOnly(self.edit_mode is False)
self.edtPort.setReadOnly(self.edit_mode is False)
self.edtProtocolVersion.setReadOnly(self.edit_mode is False)
self.edtCollateralAddress.setReadOnly(self.edit_mode is False)
self.edtCollateralPath.setReadOnly(self.edit_mode is False)
self.edtCollateralTxHash.setReadOnly(self.edit_mode is False)
self.edtCollateralTxIndex.setReadOnly(self.edit_mode is False)
self.edtGMNTxHash.setReadOnly(self.edit_mode is False)
self.edtMasternodePrivateKey.setReadOnly(self.edit_mode is False)
self.edtOwnerKey.setReadOnly(self.edit_mode is False)
self.edtOperatorKey.setReadOnly(self.edit_mode is False)
self.edtVotingKey.setReadOnly(self.edit_mode is False)
self.btnGenerateMnPrivateKey.setEnabled(self.edit_mode is True)
self.btnGenerateOwnerPrivateKey.setEnabled(self.edit_mode is True)
self.btnGenerateOperatorPrivateKey.setEnabled(self.edit_mode is True)
self.btnGenerateVotingPrivateKey.setEnabled(self.edit_mode is True)
self.btnLocateCollateral.setEnabled(self.edit_mode)
col_btn_visible = self.masternode is not None and (not self.masternode.collateralTx or
not self.masternode.collateralAddress or
not self.masternode.collateralBip32Path)
self.update_key_controls_state()
def update_dynamic_labels(self):
def style_to_color(style: str) -> str:
if style == 'hl1':
color = 'color:#00802b'
elif style == 'hl2':
color = 'color:#0047b3'
else:
color = ''
return color
def get_label_text(prefix:str, cur_key_type: str, tooltip_anchor: str, group: QActionGroup, style: str):
lbl = '???'
if self.edit_mode:
change_mode = f'<td>(<a href="{tooltip_anchor}">use {tooltip_anchor}</a>)</td>'
else:
a = group.checkedAction()
if a:
cur_key_type = a.data()
change_mode = ''
if cur_key_type == 'privkey':
lbl = prefix + ' private key'
elif cur_key_type == 'address':
lbl = prefix + ' Gewel address'
elif cur_key_type == 'pubkey':
lbl = prefix + ' public key'
elif cur_key_type == 'pubkeyhash':
lbl = prefix + ' public key hash'
return f'<table style="float:right;{style_to_color(style)}"><tr><td>{lbl}</td>{change_mode}</tr></table>'
if self.masternode:
if not self.edit_mode and not self.act_view_as_mn_private_key.isChecked():
style = 'hl2'
else:
style = ''
self.lblMasternodePrivateKey.setText(f'<span style="{style_to_color(style)}">Masternode private '
f'key</span>')
style = ''
if self.masternode.gmn_owner_key_type == InputKeyType.PRIVATE:
key_type, tooltip_anchor, placeholder_text = ('privkey', 'address', 'Enter the owner private key')
if not self.edit_mode and not self.act_view_as_owner_private_key.isChecked():
style = 'hl2'
else:
key_type, tooltip_anchor, placeholder_text = ('address', 'privkey', 'Enter the owner Gewel address')
if not self.edit_mode:
style = 'hl1' if self.act_view_as_owner_public_address.isChecked() else 'hl2'
self.lblOwnerKey.setText(get_label_text('Owner', key_type, tooltip_anchor, self.ag_owner_key, style))
self.edtOwnerKey.setPlaceholderText(placeholder_text)
style = ''
if self.masternode.gmn_operator_key_type == InputKeyType.PRIVATE:
key_type, tooltip_anchor, placeholder_text = ('privkey', 'pubkey', 'Enter the operator private key')
if not self.edit_mode and not self.act_view_as_operator_private_key.isChecked():
style = 'hl2'
else:
key_type, tooltip_anchor, placeholder_text = ('pubkey', 'privkey', 'Enter the operator public key')
if not self.edit_mode:
style = 'hl1' if self.act_view_as_operator_public_key.isChecked() else 'hl2'
self.lblOperatorKey.setText(get_label_text('Operator', key_type, tooltip_anchor, self.ag_operator_key,
style))
self.edtOperatorKey.setPlaceholderText(placeholder_text)
style = ''
if self.masternode.gmn_voting_key_type == InputKeyType.PRIVATE:
key_type, tooltip_anchor, placeholder_text = ('privkey','address', 'Enter the voting private key')
if not self.edit_mode and not self.act_view_as_voting_private_key.isChecked():
style = 'hl2'
else:
key_type, tooltip_anchor, placeholder_text = ('address', 'privkey', 'Enter the voting Gewel address')
if not self.edit_mode:
style = 'hl1' if self.act_view_as_voting_public_address.isChecked() else 'hl2'
self.lblVotingKey.setText(get_label_text('Voting', key_type, tooltip_anchor, self.ag_voting_key, style))
self.edtVotingKey.setPlaceholderText(placeholder_text)
self.set_left_label_width(self.get_max_left_label_width())
def update_key_controls_state(self):
self.edtMasternodePrivateKey.setEchoMode(QLineEdit.Normal if self.btnShowMnPrivateKey.isChecked() or
self.edit_mode else QLineEdit.Password)
self.edtOwnerKey.setEchoMode(QLineEdit.Normal if self.btnShowOwnerPrivateKey.isChecked() or
self.edit_mode else QLineEdit.Password)
self.edtOperatorKey.setEchoMode(QLineEdit.Normal if self.btnShowOperatorPrivateKey.isChecked() or
self.edit_mode else QLineEdit.Password)
self.edtVotingKey.setEchoMode(QLineEdit.Normal if self.btnShowVotingPrivateKey.isChecked() or
self.edit_mode else QLineEdit.Password)
self.update_dynamic_labels()
def masternode_data_to_ui(self):
if self.masternode:
self.act_view_as_mn_private_key.setChecked(True)
if self.masternode.gmn_owner_key_type == InputKeyType.PRIVATE:
self.act_view_as_owner_private_key.setChecked(True)
else:
self.act_view_as_owner_public_address.setChecked(True)
if self.masternode.gmn_operator_key_type == InputKeyType.PRIVATE:
self.act_view_as_operator_private_key.setChecked(True)
else:
self.act_view_as_operator_public_key.setChecked(True)
if self.masternode.gmn_voting_key_type == InputKeyType.PRIVATE:
self.act_view_as_voting_private_key.setChecked(True)
else:
self.act_view_as_voting_public_address.setChecked(True)
self.btnShowMnPrivateKey.setChecked(False)
self.btnShowOwnerPrivateKey.setChecked(False)
self.btnShowOperatorPrivateKey.setChecked(False)
self.btnShowVotingPrivateKey.setChecked(False)
self.chbRoleOwner.setChecked(self.masternode.gmn_user_roles & GMN_ROLE_OWNER)
self.chbRoleOperator.setChecked(self.masternode.gmn_user_roles & GMN_ROLE_OPERATOR)
self.chbRoleVoting.setChecked(self.masternode.gmn_user_roles & GMN_ROLE_VOTING)
self.edtName.setText(self.masternode.name)
self.edtIP.setText(self.masternode.ip)
self.edtProtocolVersion.setText(self.masternode.protocol_version if
self.masternode.use_default_protocol_version is False else '')
self.edtPort.setText(self.masternode.port)
self.edtCollateralAddress.setText(self.masternode.collateralAddress)
self.edtCollateralPath.setText(self.masternode.collateralBip32Path)
self.edtCollateralTxHash.setText(self.masternode.collateralTx)
self.edtCollateralTxIndex.setText(self.masternode.collateralTxIndex)
self.edtGMNTxHash.setText(self.masternode.gmn_tx_hash)
self.edtMasternodePrivateKey.setText(self.get_masternode_key_to_display())
self.edtOwnerKey.setText(self.get_owner_key_to_display())
self.edtVotingKey.setText(self.get_voting_key_to_display())
self.edtOperatorKey.setText(self.get_operator_key_to_display())
self.updating_ui = False
else:
for e in self.findChildren(QLineEdit):
e.setText('')
self.update_ui_controls_state()
def get_masternode_key_to_display(self) -> str:
ret = ''
if self.masternode:
if self.edit_mode:
ret = self.masternode.privateKey
else:
try:
if self.act_view_as_mn_private_key.isChecked():
ret = self.masternode.privateKey
elif self.act_view_as_mn_public_address.isChecked():
if self.masternode.privateKey:
ret = gewel_utils.wif_privkey_to_address(self.masternode.privateKey, self.app_config.gewel_network)
elif self.act_view_as_mn_public_key.isChecked():
if self.masternode.privateKey:
ret = gewel_utils.wif_privkey_to_pubkey(self.masternode.privateKey)
elif self.act_view_as_mn_public_key_hash.isChecked():
if self.masternode.privateKey:
pubkey = gewel_utils.wif_privkey_to_pubkey(self.masternode.privateKey)
pubkey_bin = bytes.fromhex(pubkey)
pub_hash = bitcoin.bin_hash160(pubkey_bin)
ret = pub_hash.hex()
else:
ret = '???'
except Exception as e:
msg = str(e)
if not msg:
msg = 'Key conversion error.'
WndUtils.errorMsg(msg)
return ret
def get_owner_key_to_display(self) -> str:
ret = ''
if self.masternode:
if self.edit_mode:
if self.masternode.gmn_owner_key_type == InputKeyType.PRIVATE:
ret = self.masternode.gmn_owner_private_key
else:
ret = self.masternode.gmn_owner_address
else:
try:
if self.masternode.gmn_owner_key_type == InputKeyType.PRIVATE:
if self.act_view_as_owner_private_key.isChecked():
ret = self.masternode.gmn_owner_private_key
elif self.act_view_as_owner_public_address.isChecked():
if self.masternode.gmn_owner_private_key:
ret = gewel_utils.wif_privkey_to_address(self.masternode.gmn_owner_private_key,
self.app_config.gewel_network)
elif self.act_view_as_owner_public_key.isChecked():
if self.masternode.gmn_owner_private_key:
ret = gewel_utils.wif_privkey_to_pubkey(self.masternode.gmn_owner_private_key)
elif self.act_view_as_owner_public_key_hash.isChecked():
if self.masternode.gmn_owner_private_key:
pubkey = gewel_utils.wif_privkey_to_pubkey(self.masternode.gmn_owner_private_key)
pubkey_bin = bytes.fromhex(pubkey)
pub_hash = bitcoin.bin_hash160(pubkey_bin)
ret = pub_hash.hex()
else:
ret = '???'
else:
if self.act_view_as_owner_public_address.isChecked():
ret = self.masternode.gmn_owner_address
elif self.act_view_as_owner_public_key_hash.isChecked():
ret = self.masternode.get_gmn_owner_pubkey_hash()
else:
ret = '???'
except Exception as e:
msg = str(e)
if not msg:
msg = 'Key conversion error.'
WndUtils.errorMsg(msg)
return ret
def get_voting_key_to_display(self) -> str:
ret = ''
if self.masternode:
if self.edit_mode:
if self.masternode.gmn_voting_key_type == InputKeyType.PRIVATE:
ret = self.masternode.gmn_voting_private_key
else:
ret = self.masternode.gmn_voting_address
else:
try:
if self.masternode.gmn_voting_key_type == InputKeyType.PRIVATE:
if self.act_view_as_voting_private_key.isChecked():
ret = self.masternode.gmn_voting_private_key
elif self.act_view_as_voting_public_address.isChecked():
if self.masternode.gmn_voting_private_key:
ret = gewel_utils.wif_privkey_to_address(self.masternode.gmn_voting_private_key,
self.app_config.gewel_network)
elif self.act_view_as_voting_public_key.isChecked():
if self.masternode.gmn_voting_private_key:
ret = gewel_utils.wif_privkey_to_pubkey(self.masternode.gmn_voting_private_key)
elif self.act_view_as_voting_public_key_hash.isChecked():
if self.masternode.gmn_voting_private_key:
pubkey = gewel_utils.wif_privkey_to_pubkey(self.masternode.gmn_voting_private_key)
pubkey_bin = bytes.fromhex(pubkey)
pub_hash = bitcoin.bin_hash160(pubkey_bin)
ret = pub_hash.hex()
else:
ret = '???'
else:
if self.act_view_as_voting_public_address.isChecked():
ret = self.masternode.gmn_voting_address
elif self.act_view_as_voting_public_key_hash.isChecked():
ret = self.masternode.get_gmn_voting_pubkey_hash()
else:
ret = '???'
except Exception as e:
msg = str(e)
if not msg:
msg = 'Key conversion error.'
WndUtils.errorMsg(msg)
return ret
def get_operator_key_to_display(self) -> str:
ret = ''
if self.masternode:
if self.edit_mode:
if self.masternode.gmn_operator_key_type == InputKeyType.PRIVATE:
ret = self.masternode.gmn_operator_private_key
else:
ret = self.masternode.gmn_operator_public_key
else:
try:
if self.masternode.gmn_operator_key_type == InputKeyType.PRIVATE:
if self.act_view_as_operator_private_key.isChecked():
ret = self.masternode.gmn_operator_private_key
elif self.act_view_as_operator_public_key.isChecked():
ret = self.masternode.get_gmn_operator_pubkey()
else:
ret = '???'
else:
if self.act_view_as_operator_public_key.isChecked():
ret = self.masternode.gmn_operator_public_key
else:
ret = '???'
except Exception as e:
msg = str(e)
if not msg:
msg = 'Key conversion error.'
WndUtils.errorMsg(msg)
return ret
@pyqtSlot(str)
def on_lblOwnerKey_linkActivated(self, link):
if self.masternode and self.edit_mode:
if self.masternode.gmn_owner_key_type == InputKeyType.PRIVATE:
self.masternode.gmn_owner_key_type = InputKeyType.PUBLIC
self.edtOwnerKey.setText(self.masternode.gmn_owner_address)
self.act_view_as_owner_private_key.setChecked(True)
else:
self.masternode.gmn_owner_key_type = InputKeyType.PRIVATE
self.edtOwnerKey.setText(self.masternode.gmn_owner_private_key)
self.act_view_as_owner_public_address.setChecked(True)
self.set_modified()
self.update_ui_controls_state()
@pyqtSlot(str)
def on_lblOperatorKey_linkActivated(self, link):
if self.masternode and self.edit_mode:
if self.masternode.gmn_operator_key_type == InputKeyType.PRIVATE:
self.masternode.gmn_operator_key_type = InputKeyType.PUBLIC
self.edtOperatorKey.setText(self.masternode.gmn_operator_public_key)
self.act_view_as_operator_private_key.setChecked(True)
else:
self.masternode.gmn_operator_key_type = InputKeyType.PRIVATE
self.edtOperatorKey.setText(self.masternode.gmn_operator_private_key)
self.act_view_as_operator_public_key.setChecked(True)
self.set_modified()
self.update_ui_controls_state()
@pyqtSlot(str)
def on_lblVotingKey_linkActivated(self, link):
if self.masternode and self.edit_mode:
if self.masternode.gmn_voting_key_type == InputKeyType.PRIVATE:
self.masternode.gmn_voting_key_type = InputKeyType.PUBLIC
self.edtVotingKey.setText(self.masternode.gmn_voting_address)
self.act_view_as_voting_private_key.setChecked(True)
else:
self.masternode.gmn_voting_key_type = InputKeyType.PRIVATE
self.edtVotingKey.setText(self.masternode.gmn_voting_private_key)
self.act_view_as_voting_public_address.setChecked(True)
self.set_modified()
self.update_ui_controls_state()
@pyqtSlot(str)
def on_lblOwnerKey_linkHovered(self, link):
if link == 'address':
tt = 'Change input type to Gewel address'
else:
tt = 'Change input type to private key'
self.lblOwnerKey.setToolTip(tt)
@pyqtSlot(str)
def on_lblOperatorKey_linkHovered(self, link):
if link == 'pub':
tt = 'Change input type to public key'
else:
tt = 'Change input type to private key'
self.lblOperatorKey.setToolTip(tt)
@pyqtSlot(str)
def on_lblVotingKey_linkHovered(self, link):
if link == 'address':
tt = 'Change input type to Gewel address'
else:
tt = 'Change input type to private key'
self.lblVotingKey.setToolTip(tt)
def get_max_left_label_width(self):
doc = QTextDocument(self)
doc.setDocumentMargin(0)
doc.setDefaultFont(self.lblOwnerKey.font())
doc.setHtml('Test')
def get_lbl_text_width(lbl):
nonlocal doc
doc.setHtml(lbl.text())
return int(doc.size().width() + 5)
w = max(get_lbl_text_width(self.lblName),
get_lbl_text_width(self.lblIP),
get_lbl_text_width(self.lblCollateral),
get_lbl_text_width(self.lblCollateralTxHash),
get_lbl_text_width(self.lblGMNTxHash),
get_lbl_text_width(self.lblMasternodePrivateKey),
get_lbl_text_width(self.lblOwnerKey),
get_lbl_text_width(self.lblOperatorKey),
get_lbl_text_width(self.lblVotingKey))
return w
def set_left_label_width(self, width):
if self.lblName.width() != width:
self.label_width_changed.emit(width)
self.lblUserRole.setFixedWidth(width)
self.lblName.setFixedWidth(width)
self.lblIP.setFixedWidth(width)
self.lblCollateral.setFixedWidth(width)
self.lblCollateralTxHash.setFixedWidth(width)
self.lblGMNTxHash.setFixedWidth(width)
self.lblMasternodePrivateKey.setFixedWidth(width)
self.lblOwnerKey.setFixedWidth(width)
self.lblOperatorKey.setFixedWidth(width)
self.lblVotingKey.setFixedWidth(width)
def set_masternode(self, masternode: MasternodeConfig):
self.updating_ui = True
self.masternode = masternode
self.masternode_data_to_ui()
def set_edit_mode(self, enabled: bool):
if self.edit_mode != enabled:
self.edit_mode = enabled
self.masternode_data_to_ui()
if not self.edit_mode:
self.lblOwnerKey.setToolTip('')
self.lblOperatorKey.setToolTip('')
self.lblVotingKey.setToolTip('')
def set_modified(self):
if self.masternode and not self.updating_ui:
self.masternode.set_modified()
self.data_changed.emit(self.masternode)
@pyqtSlot(str)
def on_lblAction_linkActivated(self, str):
if self.masternode:
determ = None
if str == 'change-to-gmn' and self.masternode.is_deterministic is False:
determ = True
elif str == 'change-to-non-gmn' and self.masternode.is_deterministic:
determ = False
if determ is not None:
self.set_deterministic(determ)
def set_deterministic(self, deterministic: bool):
self.masternode.is_deterministic = deterministic
self.update_ui_controls_state()
self.set_modified()
@pyqtSlot(bool)
def on_chbRoleOwner_toggled(self, checked):
if not self.updating_ui:
if checked:
self.masternode.gmn_user_roles |= GMN_ROLE_OWNER
else:
self.masternode.gmn_user_roles &= ~GMN_ROLE_OWNER
self.update_ui_controls_state()
self.set_modified()
self.role_modified.emit()
@pyqtSlot(bool)
def on_chbRoleOperator_toggled(self, checked):
if not self.updating_ui:
if checked:
self.masternode.gmn_user_roles |= GMN_ROLE_OPERATOR
else:
self.masternode.gmn_user_roles &= ~GMN_ROLE_OPERATOR
self.update_ui_controls_state()
self.set_modified()
self.role_modified.emit()
@pyqtSlot(bool)
def on_chbRoleVoting_toggled(self, checked):
if not self.updating_ui:
if checked:
self.masternode.gmn_user_roles |= GMN_ROLE_VOTING
else:
self.masternode.gmn_user_roles &= ~GMN_ROLE_VOTING
self.update_ui_controls_state()
self.set_modified()
self.role_modified.emit()
@pyqtSlot(str)
def on_edtName_textEdited(self, text):
if self.masternode and not self.updating_ui:
self.set_modified()
self.masternode.name = text.strip()
self.name_modified.emit(text)
@pyqtSlot(str)
def on_edtIP_textEdited(self, text):
if self.masternode and not self.updating_ui:
self.set_modified()
self.masternode.ip = text.strip()
@pyqtSlot(str)
def on_edtPort_textEdited(self, text):
if self.masternode and not self.updating_ui:
self.set_modified()
self.masternode.port = text.strip()
@pyqtSlot(str)
def on_edtProtocolVersion_textEdited(self, text):
if self.masternode and not self.updating_ui:
self.set_modified()
self.masternode.protocol_version = text.strip()
if not self.masternode.protocol_version:
self.masternode.use_default_protocol_version = True
else:
self.masternode.use_default_protocol_version = False
@pyqtSlot(str)
def on_edtCollateralAddress_textEdited(self, text):
if self.masternode and not self.updating_ui:
update_ui = ((not text) != (not self.masternode.collateralAddress))
self.set_modified()
self.masternode.collateralAddress = text.strip()
if update_ui:
self.update_ui_controls_state()
@pyqtSlot(str)
def on_edtCollateralPath_textEdited(self, text):
if self.masternode and not self.updating_ui:
update_ui = ((not text) != (not self.masternode.collateralBip32Path))
self.set_modified()
self.masternode.collateralBip32Path = text.strip()
if update_ui:
self.update_ui_controls_state()
@pyqtSlot(str)
def on_edtCollateralTxHash_textEdited(self, text):
if self.masternode and not self.updating_ui:
update_ui = ((not text) != (not self.masternode.collateralTx))
self.set_modified()
self.masternode.collateralTx = text.strip()
if update_ui:
self.update_ui_controls_state()
@pyqtSlot(str)
def on_edtCollateralTxIndex_textEdited(self, text):
if self.masternode and not self.updating_ui:
self.set_modified()
self.masternode.collateralTxIndex = text.strip()
@pyqtSlot(str)
def on_edtGMNTxHash_textEdited(self, text):
if self.masternode and not self.updating_ui:
self.set_modified()
self.masternode.gmn_tx_hash = text.strip()
@pyqtSlot(bool)
def on_btnFindGMNTxHash_clicked(self, checked):
if self.masternode and not self.updating_ui:
found_protx = None
if not ((self.masternode.ip and self.masternode.port) or
(self.masternode.collateralTx and self.masternode.collateralTxIndex)):
WndUtils.errorMsg('To be able to locate the deterministic masternode transaction you need to '
'provide the masternode ip + port or collateral tx + tx index.')
return
try:
txes = self.geweld_intf.protx('list', 'registered', True)
for protx in txes:
state = protx.get('state')
if state:
if (state.get('service') == self.masternode.ip + ':' + self.masternode.port) or \
(protx.get('collateralHash') == self.masternode.collateralTx and
str(protx.get('collateralIndex', '')) == self.masternode.collateralTxIndex):
found_protx = protx
break
except Exception as e:
pass
if found_protx:
if self.masternode.gmn_tx_hash == protx.get('proTxHash'):
WndUtils.infoMsg('You have te correct GMN TX hash in the masternode configuration.')
else:
self.edtGMNTxHash.setText(protx.get('proTxHash'))
self.masternode.gmn_tx_hash = protx.get('proTxHash')
self.set_modified()
else:
WndUtils.warnMsg('Couldn\'t find this masternode in the list of registered deterministic masternodes.')
self.set_modified()
@pyqtSlot(bool)
def on_btnBip32PathToAddress_clicked(self, checked):
if self.masternode.collateralBip32Path:
if self.main_dlg.connect_hardware_wallet():
try:
hw_session = self.main_dlg.hw_session
addr = hw_intf.get_address(hw_session, self.masternode.collateralBip32Path, show_display=True)
if addr:
self.masternode.collateralAddress = addr.strip()
self.edtCollateralAddress.setText(addr.strip())
self.set_modified()
self.update_ui_controls_state()
except CancelException:
pass
@pyqtSlot(bool)
def on_btnShowCollateralPathAddress_clicked(self, checked):
if self.masternode.collateralBip32Path:
try:
if self.main_dlg.connect_hardware_wallet():
hw_session = self.main_dlg.hw_session
addr = hw_intf.get_address(
hw_session, self.masternode.collateralBip32Path, True,
f'Displaying address for the BIP32 path <b>{self.masternode.collateralBip32Path}</b>.'
f'<br>Click the confirmation button on your device.')
except CancelException:
pass
@pyqtSlot(str)
def on_edtMasternodePrivateKey_textEdited(self, text):
if self.masternode and not self.updating_ui:
self.set_modified()
self.masternode.privateKey = text.strip()
@pyqtSlot(str)
def on_edtOwnerKey_textEdited(self, text):
if self.masternode and not self.updating_ui:
if self.masternode.gmn_owner_key_type == InputKeyType.PRIVATE:
self.masternode.gmn_owner_private_key = text.strip()
else:
self.masternode.gmn_owner_address = text.strip()
self.set_modified()
@pyqtSlot(str)
def on_edtOperatorKey_textEdited(self, text):
if self.masternode and not self.updating_ui:
if self.masternode.gmn_operator_key_type == InputKeyType.PRIVATE:
self.masternode.gmn_operator_private_key = text.strip()
else:
self.masternode.gmn_operator_public_key = text.strip()
self.set_modified()
@pyqtSlot(str)
def on_edtVotingKey_textEdited(self, text):
if self.masternode and not self.updating_ui:
if self.masternode.gmn_voting_key_type == InputKeyType.PRIVATE:
self.masternode.gmn_voting_private_key = text.strip()
else:
self.masternode.gmn_voting_address = text.strip()
self.set_modified()
def generate_priv_key(self, pk_type:str, edit_control: QLineEdit, compressed: bool):
if edit_control.text():
if WndUtils.queryDlg(
f'This will overwrite the current {pk_type} private key value. Do you really want to proceed?',
buttons=QMessageBox.Yes | QMessageBox.Cancel,
default_button=QMessageBox.Yes, icon=QMessageBox.Warning) != QMessageBox.Yes:
return None
if pk_type == 'operator':
pk = gewel_utils.generate_bls_privkey()
else:
pk = gewel_utils.generate_wif_privkey(self.app_config.gewel_network, compressed=compressed)
edit_control.setText(pk)
return pk
@pyqtSlot(bool)
def on_btnGenerateMnPrivateKey_clicked(self, checked):
if self.masternode:
pk = self.generate_priv_key('masternode', self.edtMasternodePrivateKey, True)
if pk:
self.masternode.privateKey = pk
self.btnShowMnPrivateKey.setChecked(True)
self.set_modified()
@pyqtSlot(bool)
def on_btnGenerateOwnerPrivateKey_clicked(self, checked):
if self.masternode:
pk = self.generate_priv_key('owner', self.edtOwnerKey, True)
if pk:
self.masternode.gmn_owner_private_key = pk
self.btnShowOwnerPrivateKey.setChecked(True)
self.set_modified()
@pyqtSlot(bool)
def on_btnGenerateOperatorPrivateKey_clicked(self, checked):
if self.masternode:
pk = self.generate_priv_key('operator', self.edtOperatorKey, True)
if pk:
self.masternode.gmn_operator_private_key = pk
self.btnShowOperatorPrivateKey.setChecked(True)
self.set_modified()
@pyqtSlot(bool)
def on_btnGenerateVotingPrivateKey_clicked(self, checked):
if self.masternode:
pk = self.generate_priv_key('voting', self.edtVotingKey, True)
if pk:
self.masternode.gmn_voting_private_key = pk
self.btnShowVotingPrivateKey.setChecked(True)
self.set_modified()
@pyqtSlot(bool)
def on_btnShowMnPrivateKey_toggled(self, checked):
self.edtMasternodePrivateKey.setEchoMode(QLineEdit.Normal if checked else QLineEdit.Password)
self.update_key_controls_state()
@pyqtSlot(bool)
def on_btnShowOwnerPrivateKey_toggled(self, checked):
self.edtOwnerKey.setEchoMode(QLineEdit.Normal if checked else QLineEdit.Password)
self.update_key_controls_state()
@pyqtSlot(bool)
def on_btnShowOperatorPrivateKey_toggled(self, checked):
self.edtOperatorKey.setEchoMode(QLineEdit.Normal if checked else QLineEdit.Password)
self.update_key_controls_state()
@pyqtSlot(bool)
def on_btnShowVotingPrivateKey_toggled(self, checked):
self.edtVotingKey.setEchoMode(QLineEdit.Normal if checked else QLineEdit.Password)
self.update_key_controls_state()
@pyqtSlot(bool)
def on_btnLocateCollateral_clicked(self, checked):
break_scanning = False
if not self.main_dlg.connect_hardware_wallet():
return
def do_break_scanning():
nonlocal break_scanning
break_scanning = True
return False
def check_break_scanning():
nonlocal break_scanning
return break_scanning
def apply_utxo(utxo):
self.masternode.collateralAddress = utxo.address
self.edtCollateralAddress.setText(utxo.address)
self.masternode.collateralBip32Path = utxo.bip32_path
self.edtCollateralPath.setText(utxo.bip32_path)
self.masternode.collateralTx = utxo.txid
self.edtCollateralTxHash.setText(utxo.txid)
self.masternode.collateralTxIndex = str(utxo.output_index)
self.edtCollateralTxIndex.setText(str(utxo.output_index))
self.update_ui_controls_state()
self.set_modified()
bip44_wallet = Bip44Wallet(self.app_config.hw_coin_name, self.main_dlg.hw_session,
self.app_config.db_intf, self.geweld_intf, self.app_config.gewel_network)
utxos = WndUtils.run_thread_dialog(self.get_collateral_tx_address_thread, (bip44_wallet, check_break_scanning),
True, force_close_dlg_callback=do_break_scanning)
if utxos:
if len(utxos) == 1 and not self.masternode.collateralAddress and not self.masternode.collateralTx:
used = False
for mn in self.app_config.masternodes:
if utxos[0].address == mn.collateralAddress or mn.collateralTx + '-' + str(mn.collateralTxIndex) == \
utxos[0].txid + '-' + str(utxos[0].output_index):
used = True
break
if not used:
apply_utxo(utxos[0])
return
dlg = ListCollateralTxsDlg(self, self.masternode, self.app_config, False, utxos)
if dlg.exec_():
utxo = dlg.get_selected_utxo()
if utxo:
apply_utxo(utxo)
else:
if utxos is not None:
WndUtils.warnMsg('Couldn\'t find any 777 Gewel UTXO in your wallet.')
def get_collateral_tx_address_thread(self, ctrl: CtrlObject, bip44_wallet: Bip44Wallet,
check_break_scanning_ext: Callable[[], bool]):
utxos = []
break_scanning = False
txes_cnt = 0
msg = 'Scanning wallet transactions for 777 Gewel UTXOs.<br>' \
'This may take a while (<a href="break">break</a>)....'
ctrl.dlg_config_fun(dlg_title="Scanning wallet", show_progress_bar=False)
ctrl.display_msg_fun(msg)
def check_break_scanning():
nonlocal break_scanning
if break_scanning:
# stop the scanning process if the dialog finishes or the address/bip32path has been found
raise BreakFetchTransactionsException()
if check_break_scanning_ext is not None and check_break_scanning_ext():
raise BreakFetchTransactionsException()
def fetch_txes_feeback(tx_cnt: int):
nonlocal msg, txes_cnt
txes_cnt += tx_cnt
ctrl.display_msg_fun(msg + '<br><br>' + 'Number of transactions fetched so far: ' + str(txes_cnt))
def on_msg_link_activated(link: str):
nonlocal break_scanning
if link == 'break':
break_scanning = True
lbl = ctrl.get_msg_label_control()
if lbl:
def set():
lbl.setOpenExternalLinks(False)
lbl.setTextInteractionFlags(lbl.textInteractionFlags() & ~Qt.TextSelectableByMouse)
lbl.linkActivated.connect(on_msg_link_activated)
lbl.repaint()
WndUtils.call_in_main_thread(set)
try:
bip44_wallet.on_fetch_account_txs_feedback = fetch_txes_feeback
bip44_wallet.fetch_all_accounts_txs(check_break_scanning)
for utxo in bip44_wallet.list_utxos_for_account(account_id=None, filter_by_satoshis=1e11):
utxos.append(utxo)
except BreakFetchTransactionsException:
return None
return utxos
def on_masternode_view_key_type_changed(self):
self.btnShowMnPrivateKey.setChecked(True)
self.update_key_controls_state()
self.edtMasternodePrivateKey.setText(self.get_masternode_key_to_display())
def on_owner_view_key_type_changed(self):
self.btnShowOwnerPrivateKey.setChecked(True)
self.update_key_controls_state()
self.edtOwnerKey.setText(self.get_owner_key_to_display())
def on_voting_view_key_type_changed(self):
self.btnShowVotingPrivateKey.setChecked(True)
self.update_key_controls_state()
self.edtVotingKey.setText(self.get_voting_key_to_display())
def on_operator_view_key_type_changed(self):
self.btnShowOperatorPrivateKey.setChecked(True)
self.update_key_controls_state()
self.edtOperatorKey.setText(self.get_operator_key_to_display())
def on_btnCopyMnKey_clicked(self):
cl = QApplication.clipboard()
cl.setText(self.edtMasternodePrivateKey.text())
def on_btnCopyOwnerKey_clicked(self):
cl = QApplication.clipboard()
cl.setText(self.edtOwnerKey.text())
def on_btnCopyVotingKey_clicked(self):
cl = QApplication.clipboard()
cl.setText(self.edtVotingKey.text())
def on_btnCopyOperatorKey_clicked(self):
cl = QApplication.clipboard()
cl.setText(self.edtOperatorKey.text())
| 51.278846 | 127 | 0.633125 |
aceee419f0607a74bfb0ff9649280b6961aa59ac | 331 | py | Python | TallerComplutense/Performative Tools/Music/MAX/defenPerf/other/apendificador.py | JoakuDeSotavento/Joakinator | 71384d54cb7bd3d9030fddf50d838df5116af22b | [
"MIT"
] | null | null | null | TallerComplutense/Performative Tools/Music/MAX/defenPerf/other/apendificador.py | JoakuDeSotavento/Joakinator | 71384d54cb7bd3d9030fddf50d838df5116af22b | [
"MIT"
] | null | null | null | TallerComplutense/Performative Tools/Music/MAX/defenPerf/other/apendificador.py | JoakuDeSotavento/Joakinator | 71384d54cb7bd3d9030fddf50d838df5116af22b | [
"MIT"
] | null | null | null | texto1 = 'append '
texto2 = 'method-'
texto3 = '.wav, '
with open('method.txt', 'w') as f:
for x in range(13):
if x<10:
yes = texto1 + texto2 + str(0) + str(x) + texto3
f.write(yes)
else:
yes = texto1 + texto2 + str(x) + texto3
f.write(yes)
f.close()
| 19.470588 | 60 | 0.465257 |
aceee4e6c756106b99f6a9f9dcc4fced5d77dbb7 | 2,896 | py | Python | colossalai/engine/schedule/_base_schedule.py | DevinCheung/ColossalAI | 632e622de818697f9949e35117c0432d88f62c87 | [
"Apache-2.0"
] | null | null | null | colossalai/engine/schedule/_base_schedule.py | DevinCheung/ColossalAI | 632e622de818697f9949e35117c0432d88f62c87 | [
"Apache-2.0"
] | null | null | null | colossalai/engine/schedule/_base_schedule.py | DevinCheung/ColossalAI | 632e622de818697f9949e35117c0432d88f62c87 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
from abc import ABC, abstractmethod
import torch
from torch import Tensor
from typing import Iterable, Union, List, Callable
from .._base_engine import Engine
from colossalai.logging import get_dist_logger
from colossalai.utils import get_current_device
class BaseSchedule(ABC):
"""A basic helper class to control the process of training or evaluation.
It mainly composes of forward_backward_step for gradient backward and
optimizer_step for parameters update.
For the convenience to enable FP16, we aggreate all codes that contain the
control of FP16 in class schedule.
"""
def __init__(self, batch_data_process_func: Callable = None):
self.logger = get_dist_logger()
self.batch_data_process_func = batch_data_process_func
@staticmethod
def _move_tensor(element):
if torch.is_tensor(element):
if not element.is_cuda:
return element.to(get_current_device()).detach()
return element
def _move_to_device(self, data):
if isinstance(data, (tuple, list)):
data = tuple([self._move_tensor(d) for d in data])
elif torch.is_tensor(data):
data = data.to(get_current_device()).detach()
return data
def _to_list(self, data):
if torch.is_tensor(data):
return [data]
return data
def load_batch(self, data_iter):
"""Loads a batch from data iterator. It returns the data and labels which are
already in the same GPU as where the model's.
:return: (data, label)
:rtype: (:class:`Tensor`, :class:`torch.Tensor`)
"""
if data_iter is None:
raise RuntimeError('Dataloader is not defined.')
batch_data = next(data_iter)
if self.batch_data_process_func:
data, label = self.batch_data_process_func(batch_data)
else:
data, label = batch_data
data, label = self._to_list(data), self._to_list(label)
return self._move_to_device(data), self._move_to_device(label)
def pre_processing(self, engine: Engine):
"""To perform actions before running the schedule.
"""
pass
@abstractmethod
def forward_backward_step(self,
engine: Engine,
data_iter: Iterable,
forward_only: bool,
return_loss: bool = True
):
"""The process function over a batch of dataset for training or evaluation.
:param engine: Colossalai training engine
:param inputs: input data
:param labels: ground truth
:param forward_only: If True, the process won't include backward
:param return_loss: If False, the loss won't be returned
"""
pass | 34.070588 | 85 | 0.631561 |
aceee4f9a105fb0f4973cf97e789d3065468fe4f | 2,724 | py | Python | mlflow/data.py | PeterSulcs/mlflow | 14c48e7bb1ca6cd6a3c1b249a486cd98bd5e7051 | [
"Apache-2.0"
] | 10,351 | 2018-07-31T02:52:49.000Z | 2022-03-31T23:33:13.000Z | mlflow/data.py | PeterSulcs/mlflow | 14c48e7bb1ca6cd6a3c1b249a486cd98bd5e7051 | [
"Apache-2.0"
] | 3,733 | 2018-07-31T01:38:51.000Z | 2022-03-31T23:56:25.000Z | mlflow/data.py | PeterSulcs/mlflow | 14c48e7bb1ca6cd6a3c1b249a486cd98bd5e7051 | [
"Apache-2.0"
] | 2,596 | 2018-07-31T06:38:39.000Z | 2022-03-31T23:56:32.000Z | import os
import re
import urllib.parse
from mlflow.utils import process
from mlflow.utils.annotations import deprecated
DBFS_PREFIX = "dbfs:/"
S3_PREFIX = "s3://"
GS_PREFIX = "gs://"
DBFS_REGEX = re.compile("^%s" % re.escape(DBFS_PREFIX))
S3_REGEX = re.compile("^%s" % re.escape(S3_PREFIX))
GS_REGEX = re.compile("^%s" % re.escape(GS_PREFIX))
class DownloadException(Exception):
pass
def _fetch_dbfs(uri, local_path):
print("=== Downloading DBFS file %s to local path %s ===" % (uri, os.path.abspath(local_path)))
process.exec_cmd(cmd=["databricks", "fs", "cp", "-r", uri, local_path])
def _fetch_s3(uri, local_path):
import boto3
print("=== Downloading S3 object %s to local path %s ===" % (uri, os.path.abspath(local_path)))
client_kwargs = {}
endpoint_url = os.environ.get("MLFLOW_S3_ENDPOINT_URL")
ignore_tls = os.environ.get("MLFLOW_S3_IGNORE_TLS")
if endpoint_url:
client_kwargs["endpoint_url"] = endpoint_url
if ignore_tls:
client_kwargs["verify"] = ignore_tls.lower() not in ["true", "yes", "1"]
(bucket, s3_path) = parse_s3_uri(uri)
boto3.client("s3", **client_kwargs).download_file(bucket, s3_path, local_path)
def _fetch_gs(uri, local_path):
from google.cloud import storage
print("=== Downloading GCS file %s to local path %s ===" % (uri, os.path.abspath(local_path)))
(bucket, gs_path) = parse_gs_uri(uri)
storage.Client().bucket(bucket).blob(gs_path).download_to_filename(local_path)
def parse_s3_uri(uri):
"""Parse an S3 URI, returning (bucket, path)"""
parsed = urllib.parse.urlparse(uri)
if parsed.scheme != "s3":
raise Exception("Not an S3 URI: %s" % uri)
path = parsed.path
if path.startswith("/"):
path = path[1:]
return parsed.netloc, path
def parse_gs_uri(uri):
"""Parse an GCS URI, returning (bucket, path)"""
parsed = urllib.parse.urlparse(uri)
if parsed.scheme != "gs":
raise Exception("Not a GCS URI: %s" % uri)
path = parsed.path
if path.startswith("/"):
path = path[1:]
return parsed.netloc, path
def is_uri(string):
parsed_uri = urllib.parse.urlparse(string)
return len(parsed_uri.scheme) > 0
@deprecated(alternative="mlflow.tracking.MlflowClient.download_artifacts", since="1.9")
def download_uri(uri, output_path):
if DBFS_REGEX.match(uri):
_fetch_dbfs(uri, output_path)
elif S3_REGEX.match(uri):
_fetch_s3(uri, output_path)
elif GS_REGEX.match(uri):
_fetch_gs(uri, output_path)
else:
raise DownloadException(
"`uri` must be a DBFS (%s), S3 (%s), or GCS (%s) URI, got "
"%s" % (DBFS_PREFIX, S3_PREFIX, GS_PREFIX, uri)
)
| 29.608696 | 99 | 0.657489 |
aceee504a9d84a8d0ae957733ae4638dde833130 | 768 | py | Python | gui.py | Ghayathri1/Automated-attendance | 85b72a892a6b664d90dd893a97ad1b5a8b071c95 | [
"Apache-2.0"
] | null | null | null | gui.py | Ghayathri1/Automated-attendance | 85b72a892a6b664d90dd893a97ad1b5a8b071c95 | [
"Apache-2.0"
] | null | null | null | gui.py | Ghayathri1/Automated-attendance | 85b72a892a6b664d90dd893a97ad1b5a8b071c95 | [
"Apache-2.0"
] | null | null | null | import os
from tkinter import *
from PIL import ImageTk, Image
def python():
os.system('python python.py')
def main():
root = Tk()
root.title("Record Attendance")
root.geometry("852x480")
root.minsize(height=480, width=852)
root.maxsize(height=480, width=852)
root.configure(background="grey")
C = Canvas(root, bg="blue")
filename = ImageTk.PhotoImage(Image.open("3.jpg"))
background_label = Label(root, image=filename)
background_label.place(x=0, y=0, relwidth=1, relheight=1)
C.pack()
button = Button(root, text='Start Recording', command=python, justify=CENTER)
button.config(relief=RAISED, fg="red1", width=13, height=2)
button.pack()
root.mainloop()
if __name__ == "__main__":
main()
| 21.942857 | 81 | 0.665365 |
aceee60602e6d8ec41b348e26b43883dc562d00f | 21,926 | py | Python | evaluation.py | MarcRuble/experiment-evaluation | 427797e14f1faa9e9fe5cb958ad3f4a01c406d37 | [
"MIT"
] | null | null | null | evaluation.py | MarcRuble/experiment-evaluation | 427797e14f1faa9e9fe5cb958ad3f4a01c406d37 | [
"MIT"
] | null | null | null | evaluation.py | MarcRuble/experiment-evaluation | 427797e14f1faa9e9fe5cb958ad3f4a01c406d37 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import math
from scipy.stats import shapiro
from scipy.stats import bartlett
from scipy.stats import friedmanchisquare
from pingouin import sphericity
from pingouin import rm_anova
from pingouin import wilcoxon
from pingouin import pairwise_ttests
# Encapsulates a data set and provides functions for evaluation.
# Source: https://github.com/MarcRuble/experiment-evaluation
###
class DatasetEvaluation:
def __init__(self, df):
self.df = df
self.alpha = 0.05
self.precision = 5
self.order_table = {}
####################
### MANIPULATION ###
####################
# Excludes data which fulfills the condition.
# condition: (column:string, value)
def exclude(self, condition):
self.df = self.df[self.df[condition[0]] != condition[1]]
# Replaces values in a column.
# column: string
# dict: old -> new value
def replace(self, column, dict):
self.df[column].replace(dict)
# Adds a new column which is the mean of given columns.
# columns: list of strings
# name: for new column
def add_mean(self, columns, name):
self.df[name] = self.df[columns].mean(axis=1)
###################
### QUICK STATS ###
###################
# Displays the data set as data frame.
def display(self):
df = self.df
display(df)
# Displays the data set in a sorted fashion.
# column: string
# ascending: bool
def display_sorted(self, column, ascending):
df = self.df
display(df.sort_values(column, ascending=ascending))
# Returns the mean value of a column.
# column: string
def mean(self, column):
df = self.df
return df[column].mean()
# Returns the std value of a column.
# column: string
def std(self, column):
df = self.df
return df[column].std()
# Returns the count of a property's values.
# property: column to count the available values
# index: column to use as index
def counts_by_property(self, property, index):
df = self.df
return df.groupby([property]).count()[index]
##############
### CONFIG ###
##############
# Saves an order of values for a column.
# column: string
# order: ordered list of values
def save_order(self, column, order):
self.order_table[column] = order
# Sets the alpha value used for hypothesis testing.
# alpha: new value
# default is 0.05
def set_alpha(self, alpha):
self.alpha = alpha
# Sets the number of positions after decimal
# point to print and include in tables.
# precision: new number of positions
# default is 5
def set_precision(self, precision):
self.precision = precision
##############
### CHECKS ###
##############
# Checks for a normal distribution of the values in a column
# which fulfill a given condition.
# column: string
# condition: (column:string, value) or list of (column:string, value)
# display_result: bool if the result should be displayed
# returns test statistic, p value
###
# Uses scipy.stats.shapiro test
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.shapiro.html
# "The Shapiro-Wilk test tests the null hypothesis that the data was
# drawn from a normal distribution."
###
def check_normal_distribution(self, column, condition=False, display_result=True):
data = self.__get_condition(self.df, condition)[column]
stat, p = shapiro(data)
if display_result:
print("### Normal Distribution ###")
if condition is False:
print("{0:}: stat={1:.5}, p={2:.5}".format(column, stat, p))
else:
print("{0:} with {1:}: stat={2:.5}, p={3:.5}".format(
column, condition, stat, p))
if p > self.alpha:
print('--> Gaussian-like')
else:
print('--> Non-Gaussian')
print("")
return stat, p
# Checks for homogene variances of the values in a column
# separated into groups depending on values in group column.
# value_col: string for column with values
# group_col: string for column with groups/conditions to compare
# condition: (column:string, value) or list of (column:string, value)
# display_result: bool if the result should be displayed
# returns test statistic, p value
###
# Uses scipy.stats.bartlett test
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.bartlett.html
# "Bartlett’s test tests the null hypothesis that all input samples
# are from populations with equal variances."
###
def check_homogene_variances(self, value_col, group_col, condition=False, display_result=True):
# collect data
data = self.__get_condition_sets(self.df, value_col, group_col, condition)
# perform test
stat, p = bartlett(*data)
if display_result:
print("### Homogeneity of Variances ###")
if condition is False:
print("{0:} between {1:}: stat={2:.5}, p={3:.5}".format(value_col, group_col, stat, p))
else:
print("{0:} in {1:} between {2:}: stat={3:.5}, p={4:.5}".format(value_col, condition, group_col, stat, p))
if p > self.alpha:
print('--> Homogene Variances')
else:
print('--> Non-Homogene Variances')
print("")
return stat, p
# Checks sphericity of the values in a column
# separated into groups/conditions and individuals.
# value_col: string for column with values
# group_col: string for column with groups/conditions to compare
# subject_col: string for column with subject/participant ids
# condition: (column:string, value) or list of (column:string, value)
# display_result: bool if the result should be displayed
# returns spher (bool), W test statistic, chi2 effect size, dof, p value
###
# Uses pingouin.sphericity test
# https://pingouin-stats.org/generated/pingouin.sphericity.html
# "Mauchly and JNS test for sphericity."
###
def check_sphericity(self, value_col, group_col, subject_col, condition=False, display_result=True):
data = self.__get_condition(self.df, condition)
# perform test
spher, W, chi2, dof, p = sphericity(data, value_col, group_col, subject_col)
if display_result:
print("### Sphericity ###")
if condition is False:
print("{0:} between {1:} for {2:}: W={3:.5}, chi2={4:.5}, dof={5:}, p={6:.5}".format(
value_col, group_col, subject_col, W, chi2, dof, p))
else:
print("{0:} in {1:} between {2:} for {3:}: W={4:.5}, chi2={5:.5}, dof={6:}, p={7:.5}".format(
value_col, condition, group_col, subject_col, W, chi2, dof, p))
if spher:
print('--> Sphericity given')
else:
print('--> No sphericity given')
print("")
return spher, W, chi2, dof, p
#################################
### OVERALL VARIANCE ANALYSIS ###
#################################
# Compares the values obtained in different groups/conditions
# with a Friedman test. Ignore data not matching the given condition.
# Use this for repeated-measures data without normal distribution.
# https://yatani.jp/teaching/doku.php?id=hcistats:kruskalwallis
# With following assumptions:
# https://accendoreliability.com/non-parametric-friedman-test/
###
# value_col: string for column with values
# group_col: string for column with groups/conditions to compare
# condition: (column:string, value) or list of (column:string, value)
# display_result: bool if the result should be displayed
# returns test statistic, p value
###
# Uses scipy.stats.friedmanchisquare test
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.friedmanchisquare.html
# "Compute the Friedman test for repeated measurements.
# The Friedman test tests the null hypothesis that repeated
# measurements of the same individuals have the same distribution."
###
def friedman_test(self, value_col, group_col, condition=False, display_result=True):
# collect data
data = self.__get_condition_sets(self.__get_condition(self.df, condition), value_col, group_col)
# perform test
stat, p = friedmanchisquare(*data)
if display_result:
print("################")
print("### Friedman ###")
print("################")
if not condition is False:
print(self.__condition_to_string(condition))
print("{0:} between {1:}: stat={2:.5}, p={3:.5}".format(
value_col, group_col, stat, p))
if p > self.alpha:
print('--> No significant effects')
else:
print('--> Significant effects')
print("")
return stat, p
# Compares the values obtained in different groups/conditions
# with an ANOVA repeated-measures test. Ignores data not matching
# the given condition.
# Use this for repeated-measures data with normal distribution.
# https://yatani.jp/teaching/doku.php?id=hcistats:anova
###
# value_col: string for column with values
# group_col: string for column with groups/conditions to compare
# subject_col: string for column with subject/participant ids
# condition: (column:string, value) or list of (column:string, value)
# display_result: bool if the result should be displayed
# returns a summary table with attributes like:
# F (test statistic), p-unc, p-GG-corr (for lack of sphericity),
# n2 (effect size)
###
# Uses pingouin.rm_anova test
# https://pingouin-stats.org/generated/pingouin.rm_anova.html
###
def anova_test(self, value_col, group_col, subject_col, condition=False, display_result=True):
# collect data
data = self.__get_condition(self.df, condition)
# perform test
summary = rm_anova(data, value_col, group_col, subject_col, correction=True, effsize='n2')
if display_result:
print("#############")
print("### ANOVA ###")
print("#############")
if not condition is False:
print(self.__condition_to_string(condition))
display(summary)
print("")
return summary
######################
### POST-HOC TESTS ###
######################
# Compares subgroups of the data to each other and determines
# how significant their differences are.
# If baseline parameter is given, all groups only compared to baseline.
# Else all groups are compared pairwise with each other.
# Use this as a post-hoc when Friedman's test for repeated-measures
# data without normal distributions hints at significant differences.
# https://yatani.jp/teaching/doku.php?id=hcistats:kruskalwallis#post-hoc_test
# Appropriate for paired/dependant samples:
# https://yatani.jp/teaching/doku.php?id=hcistats:wilcoxonsigned
# With following assumptions:
# https://www.statisticssolutions.com/assumptions-of-the-wilcox-sign-test/
###
# value_col: string for column with values
# group_col: string for column with groups/conditions to compare
# condition: (column:string, value) or list of (column:string, value)
# baseline: optional value of group_col treated as a baseline
# display_result: bool if the result should be displayed
# file: string path to location if csv should be saved
# returns a summary table with content depending on baseline
###
# Uses pingouin.wilcoxon:
# https://pingouin-stats.org/generated/pingouin.wilcoxon.html
# "The Wilcoxon signed-rank test [1] tests the null hypothesis
# that two related paired samples come from the same distribution."
###
def wilcoxon_test(self, value_col, group_col, condition=False,
baseline=None, display_result=True, file=None):
# collect data
df = self.__get_condition(self.df, condition)
# collect group values to compare
groups = self.__ordered_values(group_col)
# baseline gets special treatment
if baseline is not None:
groups = [x for x in groups if x != baseline]
groups.append(baseline)
# setup dict to construct dataframe
if baseline == None:
results = {'A':[], 'B':[], 'W':[], 'p':[], 'bonf':[], 'RBC':[], 'CLES':[]}
else:
results = {}
for group in groups:
results[group] = []
# collect all pairs to compare
to_compare = []
for g1 in groups:
for g2 in groups:
if g1 != g2 and not (g2,g1) in to_compare:
to_compare.append((g1,g2))
# compute results
if baseline == None:
# compare all groups to each other
for (g1, g2) in to_compare:
# perform wilcoxon
s1 = df[df[group_col]==g1][value_col]
s2 = df[df[group_col]==g2][value_col]
stats = wilcoxon(s1, s2)
# read results
W = stats['W-val'].values[0]
p = stats['p-val'].values[0]
bonf = self.__apply_bonferroni(p, len(to_compare))
rbc = stats['RBC'].values[0]
cles = stats['CLES'].values[0]
# results
results['A'].append(g1)
results['B'].append(g2)
results['W'].append(W)
results['p'].append(self.__check_p(p))
results['bonf'].append(self.__check_p(bonf))
results['RBC'].append(round(rbc, 5))
results['CLES'].append(round(cles, 5))
# create dataframe
df_res = pd.DataFrame(results)
else:
# only compare to baseline
for (g1, g2) in to_compare:
# check if this is compared to baseline
if g2 != baseline:
continue
# perform wilcoxon
s1 = df[df[group_col]==g1][value_col]
s2 = df[df[group_col]==g2][value_col]
stats = wilcoxon(s1, s2)
# read results
W = stats['W-val'].values[0]
p = stats['p-val'].values[0]
bonf = self.__apply_bonferroni(p, len(groups)-1)
rbc = stats['RBC'].values[0]
cles = stats['CLES'].values[0]
# results
results[g1].append(self.__check_p(p))
results[g1].append(self.__check_p(bonf))
results[g1].append(W)
results[g1].append(round(rbc, 5))
df_res = pd.DataFrame(results, index=pd.Index(['p', 'bonf', 'W', 'r'], name='value'), columns=pd.Index(groups[:-1], name='group'))
if display_result:
print("################")
print("### Wilcoxon ###")
print("################")
if not condition is False:
print(self.__condition_to_string(condition))
display(df_res)
print("")
if file is not None:
df_res.to_csv(file)
# Compares subgroups of the data to each other and determines
# how significant their differences are.
# If baseline parameter is given, all groups only compared to baseline.
# Else all groups are compared pairwise with each other.
# Use this as a post-hoc when ANOVA for repeated-measures
# data with normal distributions hints at significant differences.
# Appropriate for paired/dependant samples:
# https://yatani.jp/teaching/doku.php?id=hcistats:ttest#a_paired_t_test
# With following assumptions:
# https://www.statisticssolutions.com/manova-analysis-paired-sample-t-test/
###
# value_col: string for column with values
# group_col: string for column with groups/conditions to compare
# subject_col: string for column with individuals inside the groups
# condition: (column:string, value) or list of (column:string, value)
# baseline: optional value of group_col treated as a baseline
# display_result: bool if the result should be displayed
# file: string path to location if csv should be saved
# returns a summary table with content depending on baseline
###
# Uses pingouin.pairwise_ttests:
# https://pingouin-stats.org/generated/pingouin.pairwise_ttests.html
###
def paired_t_test(self, value_col, group_col, subject_col, condition=False,
baseline=None, display_result=True, file=None):
# collect data
df = self.__get_condition(self.df, condition)
# collect group values to compare
groups = self.__ordered_values(group_col)
# baseline gets special treatment
if baseline is not None:
groups = [x for x in groups if x != baseline]
groups.append(baseline)
# perform t tests
stat = pairwise_ttests(df, dv=value_col, within=group_col, subject='Participant', parametric=True, padjust='bonf', effsize='cohen')
if baseline == None:
df_res = stat
else:
# setup dict to construct dataframe
if baseline != None:
results = {}
for group in groups[:-1]:
results[group] = []
# iterate over all rows
for i, row in stat.iterrows():
if row['A'] == baseline or row['B'] == baseline:
# read results
T = float(row['T'])
p = float(row['p-unc'])
d = float(row['cohen'])
c = row['A'] if row['A'] != baseline else row['B']
# cohen's d is asymmetric
if row['A'] == baseline:
d = -d
# results
results[c].append(self.__check_p(p))
results[c].append(self.__check_p(self.__apply_bonferroni(p, len(groups)-1)))
results[c].append(round(T, 5))
results[c].append(round(d, 5))
df_res = pd.DataFrame(results,
index=pd.Index(['p', 'bonf', 'T', 'd'], name='value'),
columns=pd.Index(groups[:-1], name='condition'))
if display_result:
print("######################")
print("### Paired t-Tests ###")
print("######################")
if not condition is False:
print(self.__condition_to_string(condition))
display(df_res)
print("")
if file is not None:
df_res.to_csv(file)
##############
### HELPER ###
##############
# Returns all possible values in a column ordered.
# column: string
def __ordered_values(self, column):
if column in self.order_table:
return self.order_table[column]
else:
return self.__possible_values(column)
# Returns all possible values in a column.
# column: string
def __possible_values(self, column):
return pd.unique(self.df[column])
# Returns the rows which fulfills the condition.
# data: dataframe
# condition: (column:string, value) or list of (column:string, value)
def __get_condition(self, data, condition):
if not condition:
return data
elif isinstance(condition, list):
for single_cond in condition:
data = self.__get_condition(data, single_cond)
return data
else:
return data[data[condition[0]]==condition[1]]
# Returns a string representation of a condition.
# condition: (column:string, value) or list of (column:string, value)
def __condition_to_string(self, condition):
return str(condition)
# Returns the subset of values from a value column
# separated by their value a group column.
# data: dataframe
# value_col: string for column with values
# group_col: string for column with groups/conditions to compare
# condition: (column:string, value) or list of (column:string, value)
def __get_condition_sets(self, data, value_col, group_col, condition=None):
data = self.__get_condition(data, condition)
result = []
groups = self.__ordered_values(group_col)
for group in groups:
result.append(self.__get_condition(data, (group_col, group))[value_col])
return result
# Returns the bonferroni corrected version of a p-value.
# p: p-significance value
# num: number of comparisons besides (including) this
def __apply_bonferroni(self, p, num):
return min(p*num, 1)
# Returns an annotated version of a p-value.
# p: p-significance value
def __check_p(self, p):
if type(p) == str:
return p
elif abs(p) <= min(self.alpha, 0.001):
return str(round(p, self.precision)) + " ***"
elif abs(p) <= min(self.alpha, 0.01):
return str(round(p, self.precision)) + " **"
elif abs(p) <= self.alpha:
return str(round(p, self.precision)) + " *"
else:
return str(round(p, self.precision)) | 38.602113 | 142 | 0.583508 |
aceee7362d2b477f1d235c7e5a59e0319d35e3ec | 18,682 | py | Python | neutron_vpnaas/extensions/vpnaas.py | glove747/liberty-vpnaas | 1491bc5276a10e37f76ffc970e9993bade958108 | [
"Apache-2.0"
] | null | null | null | neutron_vpnaas/extensions/vpnaas.py | glove747/liberty-vpnaas | 1491bc5276a10e37f76ffc970e9993bade958108 | [
"Apache-2.0"
] | null | null | null | neutron_vpnaas/extensions/vpnaas.py | glove747/liberty-vpnaas | 1491bc5276a10e37f76ffc970e9993bade958108 | [
"Apache-2.0"
] | null | null | null | # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
from neutron.api import extensions
from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper
from neutron.common import exceptions as nexception
from neutron.plugins.common import constants
from neutron.services import service_base
class VPNServiceNotFound(nexception.NotFound):
message = _("VPNService %(vpnservice_id)s could not be found")
class IPsecSiteConnectionNotFound(nexception.NotFound):
message = _("ipsec_site_connection %(ipsec_site_conn_id)s not found")
class IPsecSiteConnectionDpdIntervalValueError(nexception.InvalidInput):
message = _("ipsec_site_connection %(attr)s is "
"equal to or less than dpd_interval")
class IPsecSiteConnectionMtuError(nexception.InvalidInput):
message = _("ipsec_site_connection MTU %(mtu)d is too small "
"for ipv%(version)s")
class IKEPolicyNotFound(nexception.NotFound):
message = _("IKEPolicy %(ikepolicy_id)s could not be found")
class IPsecPolicyNotFound(nexception.NotFound):
message = _("IPsecPolicy %(ipsecpolicy_id)s could not be found")
class IKEPolicyInUse(nexception.InUse):
message = _("IKEPolicy %(ikepolicy_id)s is in use by existing "
"IPsecSiteConnection and can't be updated or deleted")
class VPNServiceInUse(nexception.InUse):
message = _("VPNService %(vpnservice_id)s is still in use")
class SubnetInUseByVPNService(nexception.InUse):
message = _("Subnet %(subnet_id)s is used by VPNService %(vpnservice_id)s")
class VPNStateInvalidToUpdate(nexception.BadRequest):
message = _("Invalid state %(state)s of vpnaas resource %(id)s"
" for updating")
class IPsecPolicyInUse(nexception.InUse):
message = _("IPsecPolicy %(ipsecpolicy_id)s is in use by existing "
"IPsecSiteConnection and can't be updated or deleted")
class DeviceDriverImportError(nexception.NeutronException):
message = _("Can not load driver :%(device_driver)s")
class SubnetIsNotConnectedToRouter(nexception.BadRequest):
message = _("Subnet %(subnet_id)s is not "
"connected to Router %(router_id)s")
class RouterIsNotExternal(nexception.BadRequest):
message = _("Router %(router_id)s has no external network gateway set")
class VPNPeerAddressNotResolved(nexception.InvalidInput):
message = _("Peer address %(peer_address)s cannot be resolved")
class ExternalNetworkHasNoSubnet(nexception.BadRequest):
message = _("Router's %(router_id)s external network has "
"no %(ip_version)s subnet")
vpn_supported_initiators = ['bi-directional', 'response-only']
vpn_supported_encryption_algorithms = ['3des', 'aes-128',
'aes-192', 'aes-256']
vpn_dpd_supported_actions = [
'hold', 'clear', 'restart', 'restart-by-peer', 'disabled'
]
vpn_supported_transform_protocols = ['esp', 'ah', 'ah-esp']
vpn_supported_encapsulation_mode = ['tunnel', 'transport']
#TODO(nati) add kilobytes when we support it
vpn_supported_lifetime_units = ['seconds']
vpn_supported_pfs = ['group2', 'group5', 'group14']
vpn_supported_ike_versions = ['v1', 'v2']
vpn_supported_auth_mode = ['psk']
vpn_supported_auth_algorithms = ['sha1']
vpn_supported_phase1_negotiation_mode = ['main']
vpn_lifetime_limits = (60, attr.UNLIMITED)
positive_int = (0, attr.UNLIMITED)
RESOURCE_ATTRIBUTE_MAP = {
'vpnservices': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'subnet_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'router_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'external_v4_ip': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'external_v6_ip': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True}
},
'ipsec_site_connections': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'peer_address': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
'peer_id': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
'peer_cidrs': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_to_list,
'validate': {'type:subnet_list': None},
'is_visible': True},
'route_mode': {'allow_post': False, 'allow_put': False,
'default': 'static',
'is_visible': True},
'mtu': {'allow_post': True, 'allow_put': True,
'default': '1500',
'validate': {'type:range': positive_int},
'convert_to': attr.convert_to_int,
'is_visible': True},
'initiator': {'allow_post': True, 'allow_put': True,
'default': 'bi-directional',
'validate': {'type:values': vpn_supported_initiators},
'is_visible': True},
'auth_mode': {'allow_post': False, 'allow_put': False,
'default': 'psk',
'validate': {'type:values': vpn_supported_auth_mode},
'is_visible': True},
'psk': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True},
'dpd': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_none_to_empty_dict,
'is_visible': True,
'default': {},
'validate': {
'type:dict_or_empty': {
'actions': {
'type:values': vpn_dpd_supported_actions,
},
'interval': {
'type:range': positive_int
},
'timeout': {
'type:range': positive_int
}}}},
'admin_state_up': {'allow_post': True, 'allow_put': True,
'default': True,
'convert_to': attr.convert_to_boolean,
'is_visible': True},
'status': {'allow_post': False, 'allow_put': False,
'is_visible': True},
'vpnservice_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'ikepolicy_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True},
'ipsecpolicy_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True}
},
'ipsecpolicies': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'transform_protocol': {
'allow_post': True,
'allow_put': True,
'default': 'esp',
'validate': {
'type:values': vpn_supported_transform_protocols},
'is_visible': True},
'auth_algorithm': {
'allow_post': True,
'allow_put': True,
'default': 'sha1',
'validate': {
'type:values': vpn_supported_auth_algorithms
},
'is_visible': True},
'encryption_algorithm': {
'allow_post': True,
'allow_put': True,
'default': 'aes-128',
'validate': {
'type:values': vpn_supported_encryption_algorithms
},
'is_visible': True},
'encapsulation_mode': {
'allow_post': True,
'allow_put': True,
'default': 'tunnel',
'validate': {
'type:values': vpn_supported_encapsulation_mode
},
'is_visible': True},
'lifetime': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_none_to_empty_dict,
'default': {},
'validate': {
'type:dict_or_empty': {
'units': {
'type:values': vpn_supported_lifetime_units,
},
'value': {
'type:range': vpn_lifetime_limits
}}},
'is_visible': True},
'pfs': {'allow_post': True, 'allow_put': True,
'default': 'group5',
'validate': {'type:values': vpn_supported_pfs},
'is_visible': True}
},
'ikepolicies': {
'id': {'allow_post': False, 'allow_put': False,
'validate': {'type:uuid': None},
'is_visible': True,
'primary_key': True},
'tenant_id': {'allow_post': True, 'allow_put': False,
'validate': {'type:string': None},
'required_by_policy': True,
'is_visible': True},
'name': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'description': {'allow_post': True, 'allow_put': True,
'validate': {'type:string': None},
'is_visible': True, 'default': ''},
'auth_algorithm': {'allow_post': True, 'allow_put': True,
'default': 'sha1',
'validate': {
'type:values': vpn_supported_auth_algorithms},
'is_visible': True},
'encryption_algorithm': {
'allow_post': True, 'allow_put': True,
'default': 'aes-128',
'validate': {'type:values': vpn_supported_encryption_algorithms},
'is_visible': True},
'phase1_negotiation_mode': {
'allow_post': True, 'allow_put': True,
'default': 'main',
'validate': {
'type:values': vpn_supported_phase1_negotiation_mode
},
'is_visible': True},
'lifetime': {'allow_post': True, 'allow_put': True,
'convert_to': attr.convert_none_to_empty_dict,
'default': {},
'validate': {
'type:dict_or_empty': {
'units': {
'type:values': vpn_supported_lifetime_units,
},
'value': {
'type:range': vpn_lifetime_limits,
}}},
'is_visible': True},
'ike_version': {'allow_post': True, 'allow_put': True,
'default': 'v1',
'validate': {
'type:values': vpn_supported_ike_versions},
'is_visible': True},
'pfs': {'allow_post': True, 'allow_put': True,
'default': 'group5',
'validate': {'type:values': vpn_supported_pfs},
'is_visible': True}
}
}
class Vpnaas(extensions.ExtensionDescriptor):
@classmethod
def get_name(cls):
return "VPN service"
@classmethod
def get_alias(cls):
return "vpnaas"
@classmethod
def get_description(cls):
return "Extension for VPN service"
@classmethod
def get_namespace(cls):
return "https://wiki.openstack.org/Neutron/VPNaaS"
@classmethod
def get_updated(cls):
return "2013-05-29T10:00:00-00:00"
@classmethod
def get_resources(cls):
special_mappings = {'ikepolicies': 'ikepolicy',
'ipsecpolicies': 'ipsecpolicy'}
plural_mappings = resource_helper.build_plural_mappings(
special_mappings, RESOURCE_ATTRIBUTE_MAP)
plural_mappings['peer_cidrs'] = 'peer_cidr'
attr.PLURALS.update(plural_mappings)
return resource_helper.build_resource_info(plural_mappings,
RESOURCE_ATTRIBUTE_MAP,
constants.VPN,
register_quota=True,
translate_name=True)
@classmethod
def get_plugin_interface(cls):
return VPNPluginBase
def update_attributes_map(self, attributes):
super(Vpnaas, self).update_attributes_map(
attributes, extension_attrs_map=RESOURCE_ATTRIBUTE_MAP)
def get_extended_resources(self, version):
if version == "2.0":
return RESOURCE_ATTRIBUTE_MAP
else:
return {}
@six.add_metaclass(abc.ABCMeta)
class VPNPluginBase(service_base.ServicePluginBase):
def get_plugin_name(self):
return constants.VPN
def get_plugin_type(self):
return constants.VPN
def get_plugin_description(self):
return 'VPN service plugin'
@abc.abstractmethod
def get_vpnservices(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_vpnservice(self, context, vpnservice_id, fields=None):
pass
@abc.abstractmethod
def create_vpnservice(self, context, vpnservice):
pass
@abc.abstractmethod
def update_vpnservice(self, context, vpnservice_id, vpnservice):
pass
@abc.abstractmethod
def delete_vpnservice(self, context, vpnservice_id):
pass
@abc.abstractmethod
def get_ipsec_site_connections(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_ipsec_site_connection(self, context,
ipsecsite_conn_id, fields=None):
pass
@abc.abstractmethod
def create_ipsec_site_connection(self, context, ipsec_site_connection):
pass
@abc.abstractmethod
def update_ipsec_site_connection(self, context,
ipsecsite_conn_id, ipsec_site_connection):
pass
@abc.abstractmethod
def delete_ipsec_site_connection(self, context, ipsecsite_conn_id):
pass
@abc.abstractmethod
def get_ikepolicy(self, context, ikepolicy_id, fields=None):
pass
@abc.abstractmethod
def get_ikepolicies(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def create_ikepolicy(self, context, ikepolicy):
pass
@abc.abstractmethod
def update_ikepolicy(self, context, ikepolicy_id, ikepolicy):
pass
@abc.abstractmethod
def delete_ikepolicy(self, context, ikepolicy_id):
pass
@abc.abstractmethod
def get_ipsecpolicies(self, context, filters=None, fields=None):
pass
@abc.abstractmethod
def get_ipsecpolicy(self, context, ipsecpolicy_id, fields=None):
pass
@abc.abstractmethod
def create_ipsecpolicy(self, context, ipsecpolicy):
pass
@abc.abstractmethod
def update_ipsecpolicy(self, context, ipsecpolicy_id, ipsecpolicy):
pass
@abc.abstractmethod
def delete_ipsecpolicy(self, context, ipsecpolicy_id):
pass
| 37.817814 | 79 | 0.545605 |
aceee753cddf02703ac2d74d63a29b4d3566b6cd | 693 | py | Python | app/apps/core/migrations/0026_auto_20190521_0925.py | lawi21/escriptorium | 6043b0cb3894bb308a37ed97a26114dcea883834 | [
"MIT"
] | 4 | 2021-09-21T09:15:24.000Z | 2022-02-12T13:36:33.000Z | app/apps/core/migrations/0026_auto_20190521_0925.py | lawi21/escriptorium | 6043b0cb3894bb308a37ed97a26114dcea883834 | [
"MIT"
] | 1 | 2021-11-30T12:04:11.000Z | 2021-11-30T12:04:11.000Z | app/apps/core/migrations/0026_auto_20190521_0925.py | stweil/escriptorium | 63a063f2dbecebe9f79aa6376e99030f49a02502 | [
"MIT"
] | 2 | 2021-11-10T09:39:52.000Z | 2022-01-10T08:52:40.000Z | # Generated by Django 2.1.4 on 2019-05-21 09:25
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0025_auto_20190520_1341'),
]
operations = [
migrations.RemoveField(
model_name='ocrmodel',
name='trained',
),
migrations.AddField(
model_name='ocrmodel',
name='owner',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL),
),
]
| 26.653846 | 122 | 0.645022 |
aceee78b2895a6895eaf3fc5d5407e760b191620 | 1,104 | py | Python | app/api/api_thermostat.py | FHellmann/MLWTF | 582c3505d638907a848d5a6c739ee99981300f17 | [
"Apache-2.0"
] | null | null | null | app/api/api_thermostat.py | FHellmann/MLWTF | 582c3505d638907a848d5a6c739ee99981300f17 | [
"Apache-2.0"
] | null | null | null | app/api/api_thermostat.py | FHellmann/MLWTF | 582c3505d638907a848d5a6c739ee99981300f17 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
"""
Author: Fabio Hellmann <info@fabio-hellmann.de>
"""
from flask_restplus import Namespace, Resource, reqparse
from marshmallow import Schema, fields as ma_fields, post_load
from app.core.thermostat import thermostat_controller, Thermostat, ThermostatManufacturer
ns_ts = Namespace('thermostat', description='The thermostat interface')
get_parser = reqparse.RequestParser()
get_parser.add_argument('since', type=int)
class ThermostatManufacturerSchema(Schema):
name = ma_fields.String()
@post_load
def create_protocol(self, data):
return ThermostatManufacturer(**data)
class ThermostatSchema(Schema):
addr = ma_fields.String()
name = ma_fields.String()
issr = ma_fields.String()
manufacturer = ma_fields.Nested(ThermostatManufacturerSchema())
@post_load
def create(self, data):
return Thermostat(**data)
@ns_ts.route('/all')
class ThermostatCollectionResource(Resource):
def get(self):
schema = ThermostatSchema(many=True)
result = thermostat_controller.scan()
return schema.dump(result)
| 26.285714 | 89 | 0.734601 |
aceee7960077a48a2a56d3d5ab708faa0efb81ce | 30,512 | py | Python | tests/pulses/sequence_pulse_template_tests.py | lankes-fzj/qupulse | 46f00f70bc998b98ac1ae4721d1a9a1c10b675aa | [
"MIT"
] | 2 | 2021-05-22T00:04:20.000Z | 2021-11-17T11:21:46.000Z | tests/pulses/sequence_pulse_template_tests.py | bpapajewski/qupulse | c3969a4fa9eabe69c1143ad16e6e3ca5fb9c068e | [
"MIT"
] | null | null | null | tests/pulses/sequence_pulse_template_tests.py | bpapajewski/qupulse | c3969a4fa9eabe69c1143ad16e6e3ca5fb9c068e | [
"MIT"
] | null | null | null | import unittest
from unittest import mock
from qupulse.expressions import Expression, ExpressionScalar
from qupulse.pulses.table_pulse_template import TablePulseTemplate
from qupulse.pulses.sequence_pulse_template import SequencePulseTemplate, SequenceWaveform
from qupulse.pulses.mapping_pulse_template import MappingPulseTemplate
from qupulse.pulses.parameters import ConstantParameter, ParameterConstraint, ParameterConstraintViolation, ParameterNotProvidedException
from qupulse._program.instructions import MEASInstruction
from qupulse._program._loop import Loop, MultiChannelProgram
from qupulse.pulses.sequencing import Sequencer
from tests.pulses.sequencing_dummies import DummySequencer, DummyInstructionBlock, DummyPulseTemplate,\
DummyNoValueParameter, DummyWaveform, MeasurementWindowTestCase
from tests.serialization_dummies import DummySerializer
from tests.serialization_tests import SerializableTests
from tests._program.transformation_tests import TransformationStub
from tests.pulses.pulse_template_tests import get_appending_internal_create_program, PulseTemplateStub
class SequencePulseTemplateTest(unittest.TestCase):
def __init__(self, *args, **kwargs) -> None:
super().__init__(*args, **kwargs)
# Setup test data
self.square = TablePulseTemplate({'default': [(0, 0),
('up', 'v', 'hold'),
('down', 0, 'hold'),
('length', 0)]},
measurements=[('mw1', 'up', 'length-up')])
self.mapping1 = {
'up': 'uptime',
'down': 'uptime + length',
'v': 'voltage',
'length': '0.5 * pulse_length'
}
self.window_name_mapping = {'mw1' : 'test_window'}
self.outer_parameters = {'uptime', 'length', 'pulse_length', 'voltage'}
self.parameters = dict()
self.parameters['uptime'] = ConstantParameter(5)
self.parameters['length'] = ConstantParameter(10)
self.parameters['pulse_length'] = ConstantParameter(100)
self.parameters['voltage'] = ConstantParameter(10)
self.sequence = SequencePulseTemplate(MappingPulseTemplate(self.square,
parameter_mapping=self.mapping1,
measurement_mapping=self.window_name_mapping))
def test_external_parameters_warning(self):
dummy = DummyPulseTemplate()
with self.assertWarnsRegex(DeprecationWarning, "external_parameters",
msg="SequencePT did not issue a warning for argument external_parameters"):
SequencePulseTemplate(dummy, external_parameters={'a'})
def test_duration(self):
pt = SequencePulseTemplate(DummyPulseTemplate(duration='a'),
DummyPulseTemplate(duration='a'),
DummyPulseTemplate(duration='b'))
self.assertEqual(pt.duration, Expression('a+a+b'))
def test_parameter_names(self) -> None:
pt = SequencePulseTemplate(DummyPulseTemplate(parameter_names={'a'}), DummyPulseTemplate(parameter_names={'b'}),
parameter_constraints=['a==b', 'a<c'], measurements=[('meas', 'd', 1)])
self.assertEqual({'a', 'b', 'c', 'd'}, pt.parameter_names, )
def test_build_waveform(self):
wfs = [DummyWaveform(), DummyWaveform()]
pts = [DummyPulseTemplate(waveform=wf) for wf in wfs]
spt = SequencePulseTemplate(*pts, parameter_constraints=['a < 3'])
with self.assertRaises(ParameterConstraintViolation):
spt.build_waveform(dict(a=4), dict())
parameters = dict(a=2)
channel_mapping = dict()
wf = spt.build_waveform(parameters, channel_mapping=channel_mapping)
for wfi, pt in zip(wfs, pts):
self.assertEqual(pt.build_waveform_calls, [(parameters, dict())])
self.assertIs(pt.build_waveform_calls[0][0], parameters)
self.assertIsInstance(wf, SequenceWaveform)
for wfa, wfb in zip(wf.compare_key, wfs):
self.assertIs(wfa, wfb)
def test_identifier(self) -> None:
identifier = 'some name'
pulse = SequencePulseTemplate(DummyPulseTemplate(), identifier=identifier)
self.assertEqual(identifier, pulse.identifier)
def test_multiple_channels(self) -> None:
dummy = DummyPulseTemplate(parameter_names={'hugo'}, defined_channels={'A', 'B'})
subtemplates = [(dummy, {'hugo': 'foo'}, {}), (dummy, {'hugo': '3'}, {})]
sequence = SequencePulseTemplate(*subtemplates)
self.assertEqual({'A', 'B'}, sequence.defined_channels)
self.assertEqual({'foo'}, sequence.parameter_names)
def test_multiple_channels_mismatch(self) -> None:
with self.assertRaises(ValueError):
SequencePulseTemplate(DummyPulseTemplate(defined_channels={'A'}),
DummyPulseTemplate(defined_channels={'B'}))
with self.assertRaises(ValueError):
SequencePulseTemplate(
DummyPulseTemplate(defined_channels={'A'}), DummyPulseTemplate(defined_channels={'A', 'B'})
)
def test_integral(self) -> None:
dummy1 = DummyPulseTemplate(defined_channels={'A', 'B'},
integrals={'A': ExpressionScalar('k+2*b'), 'B': ExpressionScalar('3')})
dummy2 = DummyPulseTemplate(defined_channels={'A', 'B'},
integrals={'A': ExpressionScalar('7*(b-f)'), 'B': ExpressionScalar('0.24*f-3.0')})
pulse = SequencePulseTemplate(dummy1, dummy2)
self.assertEqual({'A': ExpressionScalar('k+2*b+7*(b-f)'), 'B': ExpressionScalar('0.24*f')}, pulse.integral)
def test_concatenate(self):
a = DummyPulseTemplate(parameter_names={'foo'}, defined_channels={'A'})
b = DummyPulseTemplate(parameter_names={'bar'}, defined_channels={'A'})
spt_anon = SequencePulseTemplate(a, b)
spt_id = SequencePulseTemplate(a, b, identifier='id')
spt_meas = SequencePulseTemplate(a, b, measurements=[('m', 0, 'd')])
spt_constr = SequencePulseTemplate(a, b, parameter_constraints=['a < b'])
merged = SequencePulseTemplate.concatenate(a, spt_anon, b)
self.assertEqual(merged.subtemplates, [a, a, b, b])
result = SequencePulseTemplate.concatenate(a, spt_id, b)
self.assertEqual(result.subtemplates, [a, spt_id, b])
result = SequencePulseTemplate.concatenate(a, spt_meas, b)
self.assertEqual(result.subtemplates, [a, spt_meas, b])
result = SequencePulseTemplate.concatenate(a, spt_constr, b)
self.assertEqual(result.subtemplates, [a, spt_constr, b])
class SequencePulseTemplateSerializationTests(SerializableTests, unittest.TestCase):
@property
def class_to_test(self):
return SequencePulseTemplate
def make_kwargs(self):
return {
'subtemplates': [DummyPulseTemplate(), DummyPulseTemplate()],
'parameter_constraints': [str(ParameterConstraint('a<b'))],
'measurements': [('m', 0, 1)]
}
def make_instance(self, identifier=None, registry=None):
kwargs = self.make_kwargs()
subtemplates = kwargs['subtemplates']
del kwargs['subtemplates']
return self.class_to_test(identifier=identifier, *subtemplates, **kwargs, registry=registry)
def assert_equal_instance_except_id(self, lhs: SequencePulseTemplate, rhs: SequencePulseTemplate):
self.assertIsInstance(lhs, SequencePulseTemplate)
self.assertIsInstance(rhs, SequencePulseTemplate)
self.assertEqual(lhs.subtemplates, rhs.subtemplates)
self.assertEqual(lhs.parameter_constraints, rhs.parameter_constraints)
self.assertEqual(lhs.measurement_declarations, rhs.measurement_declarations)
class SequencePulseTemplateOldSerializationTests(unittest.TestCase):
def setUp(self) -> None:
self.table_foo = TablePulseTemplate({'default': [('hugo', 2),
('albert', 'voltage')]},
parameter_constraints=['albert<9.1'],
measurements=[('mw_foo','hugo','albert')],
identifier='foo',
registry=dict())
self.foo_param_mappings = dict(hugo='ilse', albert='albert', voltage='voltage')
self.foo_meas_mappings = dict(mw_foo='mw_bar')
def test_get_serialization_data_old(self) -> None:
# test for deprecated version during transition period, remove after final switch
with self.assertWarnsRegex(DeprecationWarning, "deprecated",
msg="SequencePT does not issue warning for old serialization routines."):
dummy1 = DummyPulseTemplate()
dummy2 = DummyPulseTemplate()
sequence = SequencePulseTemplate(dummy1, dummy2, parameter_constraints=['a<b'], measurements=[('m', 0, 1)],
registry=dict())
serializer = DummySerializer(serialize_callback=lambda x: str(x))
expected_data = dict(
subtemplates=[str(dummy1), str(dummy2)],
parameter_constraints=['a < b'],
measurements=[('m', 0, 1)]
)
data = sequence.get_serialization_data(serializer)
self.assertEqual(expected_data, data)
def test_deserialize_old(self) -> None:
# test for deprecated version during transition period, remove after final switch
with self.assertWarnsRegex(DeprecationWarning, "deprecated",
msg="SequencePT does not issue warning for old serialization routines."):
dummy1 = DummyPulseTemplate()
dummy2 = DummyPulseTemplate()
serializer = DummySerializer(serialize_callback=lambda x: str(id(x)))
data = dict(
subtemplates=[serializer.dictify(dummy1), serializer.dictify(dummy2)],
identifier='foo',
parameter_constraints=['a < b'],
measurements=[('m', 0, 1)]
)
template = SequencePulseTemplate.deserialize(serializer, **data)
self.assertEqual(template.subtemplates, [dummy1, dummy2])
self.assertEqual(template.parameter_constraints, [ParameterConstraint('a<b')])
self.assertEqual(template.measurement_declarations, [('m', 0, 1)])
class SequencePulseTemplateSequencingTests(MeasurementWindowTestCase):
def test_internal_create_program(self):
sub_templates = PulseTemplateStub(defined_channels={'a'}, duration=ExpressionScalar('t1')),\
PulseTemplateStub(defined_channels={'a'}, duration=ExpressionScalar('t2'))
wfs = DummyWaveform(duration=1), DummyWaveform(duration=2)
spt = SequencePulseTemplate(*sub_templates, measurements=[('m', 'a', 'b')])
kwargs = dict(parameters=dict(t1=ConstantParameter(.4),
t2=ConstantParameter(.5),
a=ConstantParameter(.1), b=ConstantParameter(.2),
irrelevant=ConstantParameter(42)),
measurement_mapping={'m': 'l'},
channel_mapping={'g': 'h'},
global_transformation=TransformationStub(),
to_single_waveform={'to', 'single', 'waveform'})
program = Loop()
expected_program = Loop(children=[Loop(waveform=wfs[0]),
Loop(waveform=wfs[1])],
measurements=[('l', .1, .2)])
with mock.patch.object(spt, 'validate_parameter_constraints') as validate_parameter_constraints:
with mock.patch.object(spt, 'get_measurement_windows',
return_value=[('l', .1, .2)]) as get_measurement_windows:
with mock.patch.object(sub_templates[0], '_create_program',
wraps=get_appending_internal_create_program(wfs[0], True)) as create_0,\
mock.patch.object(sub_templates[1], '_create_program',
wraps=get_appending_internal_create_program(wfs[1], True)) as create_1:
spt._internal_create_program(**kwargs, parent_loop=program)
self.assertEqual(expected_program, program)
validate_parameter_constraints.assert_called_once_with(parameters=kwargs['parameters'])
get_measurement_windows.assert_called_once_with(dict(a=.1, b=.2), kwargs['measurement_mapping'])
create_0.assert_called_once_with(**kwargs, parent_loop=program)
create_1.assert_called_once_with(**kwargs, parent_loop=program)
def test_create_program_internal(self) -> None:
sub1 = DummyPulseTemplate(duration=3, waveform=DummyWaveform(duration=3), measurements=[('b', 1, 2)], defined_channels={'A'})
sub2 = DummyPulseTemplate(duration=2, waveform=DummyWaveform(duration=2), parameter_names={'foo'}, defined_channels={'A'})
parameters = {'foo': DummyNoValueParameter()}
measurement_mapping = {'a': 'a', 'b': 'b'}
channel_mapping = dict()
seq = SequencePulseTemplate(sub1, sub2, measurements=[('a', 0, 1)])
loop = Loop()
seq._internal_create_program(parameters=parameters,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
self.assertEqual(1, loop.repetition_count)
self.assertIsNone(loop.waveform)
self.assertEqual([Loop(repetition_count=1, waveform=sub1.waveform),
Loop(repetition_count=1, waveform=sub2.waveform)],
loop.children)
self.assert_measurement_windows_equal({'a': ([0], [1]), 'b': ([1], [2])}, loop.get_measurement_windows())
# ensure same result as from Sequencer
sequencer = Sequencer()
sequencer.push(seq, parameters=parameters, conditions={}, window_mapping=measurement_mapping, channel_mapping=channel_mapping)
block = sequencer.build()
old_program = MultiChannelProgram(block, channels={'A'})
self.assertEqual(old_program.programs[frozenset({'A'})], loop)
### test again with inverted sequence
seq = SequencePulseTemplate(sub2, sub1, measurements=[('a', 0, 1)])
loop = Loop()
seq._internal_create_program(parameters=parameters,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
self.assertEqual(1, loop.repetition_count)
self.assertIsNone(loop.waveform)
self.assertEqual([Loop(repetition_count=1, waveform=sub2.waveform),
Loop(repetition_count=1, waveform=sub1.waveform)],
loop.children)
self.assert_measurement_windows_equal({'a': ([0], [1]), 'b': ([3], [2])}, loop.get_measurement_windows())
# ensure same result as from Sequencer
sequencer = Sequencer()
sequencer.push(seq, parameters=parameters, conditions={}, window_mapping=measurement_mapping, channel_mapping=channel_mapping)
block = sequencer.build()
old_program = MultiChannelProgram(block, channels={'A'})
self.assertEqual(old_program.programs[frozenset({'A'})], loop)
def test_internal_create_program_no_measurement_mapping(self) -> None:
sub1 = DummyPulseTemplate(duration=3, waveform=DummyWaveform(duration=3), measurements=[('b', 1, 2)])
sub2 = DummyPulseTemplate(duration=2, waveform=DummyWaveform(duration=2), parameter_names={'foo'})
parameters = {'foo': DummyNoValueParameter()}
seq = SequencePulseTemplate(sub1, sub2, measurements=[('a', 0, 1)])
children = [Loop(waveform=DummyWaveform())]
loop = Loop(measurements=[], children=children)
with self.assertRaises(KeyError):
seq._internal_create_program(parameters=parameters,
measurement_mapping=dict(),
channel_mapping=dict(),
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
self.assertFalse(sub1.create_program_calls)
self.assertFalse(sub2.create_program_calls)
self.assertEqual(children, loop.children)
self.assertEqual(1, loop.repetition_count)
self.assertIsNone(loop.waveform)
self.assert_measurement_windows_equal({}, loop.get_measurement_windows())
# test for child level measurements (does not guarantee to leave parent_loop unchanged in this case)
with self.assertRaises(KeyError):
seq._internal_create_program(parameters=parameters,
measurement_mapping=dict(a='a'),
channel_mapping=dict(),
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
def test_internal_create_program_one_child_no_duration(self) -> None:
sub1 = DummyPulseTemplate(duration=0, waveform=None, measurements=[('b', 1, 2)], defined_channels={'A'})
sub2 = DummyPulseTemplate(duration=2, waveform=DummyWaveform(duration=2), parameter_names={'foo'}, defined_channels={'A'})
parameters = {'foo': DummyNoValueParameter()}
measurement_mapping = {'a': 'a', 'b': 'b'}
channel_mapping = dict()
seq = SequencePulseTemplate(sub1, sub2, measurements=[('a', 0, 1)])
loop = Loop()
seq._internal_create_program(parameters=parameters,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
self.assertEqual(1, loop.repetition_count)
self.assertIsNone(loop.waveform)
self.assertEqual([Loop(repetition_count=1, waveform=sub2.waveform)],
loop.children)
self.assert_measurement_windows_equal({'a': ([0], [1])}, loop.get_measurement_windows())
# ensure same result as from Sequencer
sequencer = Sequencer()
sequencer.push(seq, parameters=parameters, conditions={}, window_mapping=measurement_mapping, channel_mapping=channel_mapping)
block = sequencer.build()
old_program = MultiChannelProgram(block, channels={'A'})
self.assertEqual(old_program.programs[frozenset({'A'})], loop)
### test again with inverted sequence
seq = SequencePulseTemplate(sub2, sub1, measurements=[('a', 0, 1)])
loop = Loop()
seq._internal_create_program(parameters=parameters,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
self.assertEqual(1, loop.repetition_count)
self.assertIsNone(loop.waveform)
self.assertEqual([Loop(repetition_count=1, waveform=sub2.waveform)],
loop.children)
self.assert_measurement_windows_equal({'a': ([0], [1])}, loop.get_measurement_windows())
# ensure same result as from Sequencer
sequencer = Sequencer()
sequencer.push(seq, parameters=parameters, conditions={}, window_mapping=measurement_mapping, channel_mapping=channel_mapping)
block = sequencer.build()
old_program = MultiChannelProgram(block, channels={'A'})
self.assertEqual(old_program.programs[frozenset({'A'})], loop)
def test_internal_create_program_both_children_no_duration(self) -> None:
sub1 = DummyPulseTemplate(duration=0, waveform=None, measurements=[('b', 1, 2)], defined_channels={'A'})
sub2 = DummyPulseTemplate(duration=0, waveform=None, parameter_names={'foo'}, defined_channels={'A'})
parameters = {'foo': DummyNoValueParameter()}
measurement_mapping = {'a': 'a', 'b': 'b'}
channel_mapping = dict()
seq = SequencePulseTemplate(sub1, sub2, measurements=[('a', 0, 1)])
loop = Loop(measurements=None)
seq._internal_create_program(parameters=parameters,
measurement_mapping=measurement_mapping,
channel_mapping=channel_mapping,
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
self.assertEqual(1, loop.repetition_count)
self.assertIsNone(loop.waveform)
self.assertEqual([], loop.children)
self.assertIsNone(loop._measurements)
# ensure same result as from Sequencer
sequencer = Sequencer()
sequencer.push(seq, parameters=parameters, conditions={}, window_mapping=measurement_mapping, channel_mapping=channel_mapping)
block = sequencer.build()
old_program = MultiChannelProgram(block, channels={'A'})
old_loop = old_program.programs[frozenset({'A'})]
self.assertEqual(old_loop.waveform, loop.waveform)
self.assertEqual(old_loop.children, loop.children)
# new loop will have no measurements. old_loop still defines SequencePT measurements
def test_internal_create_program_parameter_constraint_violations(self) -> None:
sub1 = DummyPulseTemplate(duration=3, waveform=DummyWaveform(duration=3), measurements=[('b', 1, 2)])
sub2 = DummyPulseTemplate(duration=2, waveform=DummyWaveform(duration=2), parameter_names={'foo'})
parameters = {'foo': ConstantParameter(7)}
seq = SequencePulseTemplate(sub1, sub2, measurements=[('a', 0, 1)], parameter_constraints={'foo < 2'})
loop = Loop()
with self.assertRaises(ParameterConstraintViolation):
seq._internal_create_program(parameters=parameters,
measurement_mapping={'a': 'a', 'b': 'b'},
channel_mapping=dict(),
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
def test_internal_create_program_parameter_missing(self) -> None:
sub1 = DummyPulseTemplate(duration=3, waveform=DummyWaveform(duration=3), measurements=[('b', 1, 2)])
sub2 = DummyPulseTemplate(duration='d', waveform=DummyWaveform(duration=2), parameter_names={'foo'})
seq = SequencePulseTemplate(sub1, sub2, measurements=[('a', 'bar', 1)], parameter_constraints={'foo < 2'})
loop = Loop()
# test parameter from constraints
parameters = {}
with self.assertRaises(ParameterNotProvidedException):
seq._internal_create_program(parameters=parameters,
measurement_mapping={'a': 'a', 'b': 'b'},
channel_mapping=dict(),
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
# test parameter from measurements
parameters = {'foo': ConstantParameter(1)}
with self.assertRaises(ParameterNotProvidedException):
seq._internal_create_program(parameters=parameters,
measurement_mapping={'a': 'a', 'b': 'b'},
channel_mapping=dict(),
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
# test parameter from duration
parameters = {'foo': ConstantParameter(1), 'bar': ConstantParameter(0)}
with self.assertRaises(ParameterNotProvidedException):
seq._internal_create_program(parameters=parameters,
measurement_mapping={'a': 'a', 'b': 'b'},
channel_mapping=dict(),
global_transformation=None,
to_single_waveform=set(),
parent_loop=loop)
class SequencePulseTemplateOldSequencingTests(SequencePulseTemplateTest):
def test_build_sequence(self) -> None:
sub1 = DummyPulseTemplate(requires_stop=False)
sub2 = DummyPulseTemplate(requires_stop=True, parameter_names={'foo'})
parameters = {'foo': DummyNoValueParameter()}
sequencer = DummySequencer()
block = DummyInstructionBlock()
seq = SequencePulseTemplate(sub1, (sub2, {'foo': 'foo'}), measurements=[('a', 0, 1)])
seq.build_sequence(sequencer, parameters,
conditions=dict(),
channel_mapping={'default': 'a'},
measurement_mapping={'a': 'b'},
instruction_block=block)
self.assertEqual(2, len(sequencer.sequencing_stacks[block]))
self.assertEqual(block.instructions[0], MEASInstruction([('b', 0, 1)]))
sequencer = DummySequencer()
block = DummyInstructionBlock()
seq = SequencePulseTemplate((sub2, {'foo': 'foo'}), sub1)
seq.build_sequence(sequencer, parameters, {}, {}, {}, block)
self.assertEqual(2, len(sequencer.sequencing_stacks[block]))
@unittest.skip("Was this test faulty before? Why should the three last cases return false?")
def test_requires_stop(self) -> None:
sub1 = (DummyPulseTemplate(requires_stop=False), {}, {})
sub2 = (DummyPulseTemplate(requires_stop=True, parameter_names={'foo'}), {'foo': 'foo'}, {})
parameters = {'foo': DummyNoValueParameter()}
seq = SequencePulseTemplate(sub1)
self.assertFalse(seq.requires_stop(parameters, {}))
seq = SequencePulseTemplate(sub2)
self.assertFalse(seq.requires_stop(parameters, {}))
seq = SequencePulseTemplate(sub1, sub2)
self.assertFalse(seq.requires_stop(parameters, {}))
seq = SequencePulseTemplate(sub2, sub1)
self.assertFalse(seq.requires_stop(parameters, {}))
def test_crash(self) -> None:
table = TablePulseTemplate({'default': [('ta', 'va', 'hold'),
('tb', 'vb', 'linear'),
('tend', 0, 'jump')]}, identifier='foo')
expected_parameters = {'ta', 'tb', 'tc', 'td', 'va', 'vb', 'tend'}
first_mapping = {
'ta': 'ta',
'tb': 'tb',
'va': 'va',
'vb': 'vb',
'tend': 'tend'
}
second_mapping = {
'ta': 'tc',
'tb': 'td',
'va': 'vb',
'vb': 'va + vb',
'tend': '2 * tend'
}
sequence = SequencePulseTemplate((table, first_mapping, {}), (table, second_mapping, {}))
self.assertEqual(expected_parameters, sequence.parameter_names)
parameters = {
'ta': ConstantParameter(2),
'va': ConstantParameter(2),
'tb': ConstantParameter(4),
'vb': ConstantParameter(3),
'tc': ConstantParameter(5),
'td': ConstantParameter(11),
'tend': ConstantParameter(6)}
sequencer = DummySequencer()
block = DummyInstructionBlock()
self.assertFalse(sequence.requires_stop(parameters, {}))
sequence.build_sequence(sequencer,
parameters=parameters,
conditions={},
measurement_mapping={},
channel_mapping={'default': 'default'},
instruction_block=block)
from qupulse.pulses.sequencing import Sequencer
s = Sequencer()
s.push(sequence, parameters, channel_mapping={'default': 'EXAMPLE_A'})
class SequencePulseTemplateTestProperties(SequencePulseTemplateTest):
def test_is_interruptable(self):
self.assertTrue(
SequencePulseTemplate(DummyPulseTemplate(is_interruptable=True),
DummyPulseTemplate(is_interruptable=True)).is_interruptable)
self.assertTrue(
SequencePulseTemplate(DummyPulseTemplate(is_interruptable=True),
DummyPulseTemplate(is_interruptable=False)).is_interruptable)
self.assertFalse(
SequencePulseTemplate(DummyPulseTemplate(is_interruptable=False),
DummyPulseTemplate(is_interruptable=False)).is_interruptable)
def test_measurement_names(self):
d1 = DummyPulseTemplate(measurement_names={'a'})
d2 = DummyPulseTemplate(measurement_names={'b'})
spt = SequencePulseTemplate(d1, d2, measurements=[('c', 0, 1)])
self.assertEqual(spt.measurement_names, {'a', 'b', 'c'})
if __name__ == "__main__":
unittest.main(verbosity=2)
| 51.715254 | 137 | 0.596683 |
aceee8e2efa6778cbde5f59b499cffefbba4d5d9 | 1,544 | py | Python | cases/main_case.py | Geroff/Esee3AutoTest | 490b1dc9bd7c2ecf39d796f300bd7b0cdf680220 | [
"Apache-2.0"
] | null | null | null | cases/main_case.py | Geroff/Esee3AutoTest | 490b1dc9bd7c2ecf39d796f300bd7b0cdf680220 | [
"Apache-2.0"
] | null | null | null | cases/main_case.py | Geroff/Esee3AutoTest | 490b1dc9bd7c2ecf39d796f300bd7b0cdf680220 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
import time
from base_case import BaseCase
from PO.notification_page import NotificationPage
class MainTestCase(BaseCase):
def setUp(self):
# 调用父类方法
super(MainTestCase, self).setUp()
# def test_is_device_dialog_show(self):
# """
# 测试是否是显示新设备对话框
# """
# time.sleep(2)
# self.assertIsNotNone(self.uiHelper.find_element(MainView.main_device_list_dialog_tv_title))
#
# def test_sweep(self):
# """
# 测试滑动刷新
# """
# time.sleep(2)
# element = self.uiHelper.find_element(MainView.main_device_list_dialog_tv_title)
# if element is not None:
# items = self.uiHelper.find_elements(MainView.main_device_list_dialog_rl_item)
# print(len(items))
# self.uiHelper.press_back_key()
# time.sleep(2)
# self.uiHelper.swipe(400, 400, 400, 800, 100)
# time.sleep(5)
def test_drop_down_notification(self):
time.sleep(5)
notification_page = NotificationPage(self.driver)
height = notification_page.get_page_height() - 10 # 需要小于屏幕高度,否则会报错
notification_page.drop_down_notification()
time.sleep(5)
# is_receive_verify_code = notification_page.wait_for_receive_verify_code()
# print ('is_receive_verify_code?' + str(is_receive_verify_code))
# notification_page.get_verify_text()
# notification_page.test()
notification_page.drop_up_notification(height)
time.sleep(5)
| 29.692308 | 101 | 0.645078 |
aceee90338c027c9b1391a0776f0e1c299dd1387 | 4,851 | py | Python | tensornetwork/backends/tensorflow/decompositions_test.py | jensenjhwang/TensorNetwork | 35d1247cc3fb80768965f7429ac9b8b914a144a8 | [
"Apache-2.0"
] | 1 | 2020-04-09T03:52:20.000Z | 2020-04-09T03:52:20.000Z | tensornetwork/backends/tensorflow/decompositions_test.py | jensenjhwang/TensorNetwork | 35d1247cc3fb80768965f7429ac9b8b914a144a8 | [
"Apache-2.0"
] | null | null | null | tensornetwork/backends/tensorflow/decompositions_test.py | jensenjhwang/TensorNetwork | 35d1247cc3fb80768965f7429ac9b8b914a144a8 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 The TensorNetwork Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import numpy as np
import tensorflow as tf
from tensornetwork.backends.tensorflow import decompositions
class DecompositionsTest(tf.test.TestCase):
def test_expected_shapes(self):
val = tf.zeros((2, 3, 4, 5))
u, s, vh, _ = decompositions.svd_decomposition(tf, val, 2)
self.assertEqual(u.shape, (2, 3, 6))
self.assertEqual(s.shape, (6,))
self.assertAllClose(s, np.zeros(6))
self.assertEqual(vh.shape, (6, 4, 5))
def test_expected_shapes_qr(self):
val = tf.zeros((2, 3, 4, 5))
q, r = decompositions.qr_decomposition(tf, val, 2)
self.assertEqual(q.shape, (2, 3, 6))
self.assertEqual(r.shape, (6, 4, 5))
def test_expected_shapes_rq(self):
val = tf.zeros((2, 3, 4, 5))
r, q = decompositions.rq_decomposition(tf, val, 2)
self.assertEqual(r.shape, (2, 3, 6))
self.assertEqual(q.shape, (6, 4, 5))
def test_rq_decomposition(self):
random_matrix = np.random.rand(10, 10)
r, q = decompositions.rq_decomposition(tf, random_matrix, 1)
self.assertAllClose(tf.tensordot(r, q, ([1], [0])), random_matrix)
def test_qr_decomposition(self):
random_matrix = np.random.rand(10, 10)
q, r = decompositions.qr_decomposition(tf, random_matrix, 1)
self.assertAllClose(tf.tensordot(q, r, ([1], [0])), random_matrix)
def test_rq_decomposition_defun(self):
random_matrix = np.random.rand(10, 10)
rq_decomposition = tf.function(decompositions.rq_decomposition)
r, q = rq_decomposition(tf, random_matrix, 1)
self.assertAllClose(tf.tensordot(r, q, ([1], [0])), random_matrix)
def test_qr_decomposition_defun(self):
random_matrix = np.random.rand(10, 10)
qr_decomposition = tf.function(decompositions.qr_decomposition)
q, r = qr_decomposition(tf, random_matrix, 1)
self.assertAllClose(tf.tensordot(q, r, ([1], [0])), random_matrix)
def test_max_singular_values(self):
random_matrix = np.random.rand(10, 10)
unitary1, _, unitary2 = np.linalg.svd(random_matrix)
singular_values = np.array(range(10))
val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))
u, s, vh, trun = decompositions.svd_decomposition(
tf, val, 1, max_singular_values=7)
self.assertEqual(u.shape, (10, 7))
self.assertEqual(s.shape, (7,))
self.assertAllClose(s, np.arange(9, 2, -1))
self.assertEqual(vh.shape, (7, 10))
self.assertAllClose(trun, np.arange(2, -1, -1))
def test_max_singular_values_defun(self):
random_matrix = np.random.rand(10, 10)
unitary1, _, unitary2 = np.linalg.svd(random_matrix)
singular_values = np.array(range(10))
val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))
svd_decomposition = tf.function(decompositions.svd_decomposition)
u, s, vh, trun = svd_decomposition(tf, val, 1, max_singular_values=7)
self.assertEqual(u.shape, (10, 7))
self.assertEqual(s.shape, (7,))
self.assertAllClose(s, np.arange(9, 2, -1))
self.assertEqual(vh.shape, (7, 10))
self.assertAllClose(trun, np.arange(2, -1, -1))
def test_max_truncation_error(self):
random_matrix = np.random.rand(10, 10)
unitary1, _, unitary2 = np.linalg.svd(random_matrix)
singular_values = np.array(range(10))
val = unitary1.dot(np.diag(singular_values).dot(unitary2.T))
u, s, vh, trun = decompositions.svd_decomposition(
tf, val, 1, max_truncation_error=math.sqrt(5.1))
self.assertEqual(u.shape, (10, 7))
self.assertEqual(s.shape, (7,))
self.assertAllClose(s, np.arange(9, 2, -1))
self.assertEqual(vh.shape, (7, 10))
self.assertAllClose(trun, np.arange(2, -1, -1))
def test_max_truncation_error_relative(self):
absolute = np.diag([2.0, 1.0, 0.2, 0.1])
relative = np.diag([2.0, 1.0, 0.2, 0.1])
max_truncation_err = 0.2
_, _, _, trunc_sv_absolute = decompositions.svd_decomposition(
tf, absolute, 1,
max_truncation_error=max_truncation_err,
relative=False)
_, _, _, trunc_sv_relative = decompositions.svd_decomposition(
tf, relative, 1,
max_truncation_error=max_truncation_err,
relative=True)
np.testing.assert_almost_equal(trunc_sv_absolute, [0.1])
np.testing.assert_almost_equal(trunc_sv_relative, [0.2, 0.1])
if __name__ == '__main__':
tf.test.main()
| 39.762295 | 74 | 0.695321 |
aceee94e18fbba6b064daf57c60785c2c7df80ea | 2,917 | py | Python | pdx-extract/tests/test_pyppeteer.py | michaelheyman/PSU-Code-Review | 5a55d981425aaad69dc9ee06baaaef22bc426893 | [
"MIT"
] | null | null | null | pdx-extract/tests/test_pyppeteer.py | michaelheyman/PSU-Code-Review | 5a55d981425aaad69dc9ee06baaaef22bc426893 | [
"MIT"
] | null | null | null | pdx-extract/tests/test_pyppeteer.py | michaelheyman/PSU-Code-Review | 5a55d981425aaad69dc9ee06baaaef22bc426893 | [
"MIT"
] | null | null | null | import asyncio
import asynctest
import pytest
from app import pyppeteer
from tests import utils
@pytest.mark.asyncio
@asynctest.patch("pyppeteer.launch")
async def test_initialize_browser_returns_browser(mock_launch):
mock_launch = utils.set_async_result(mock_launch, "mock-browser")
browser = await pyppeteer.initialize()
mock_launch.assert_called
assert browser == "mock-browser"
@pytest.mark.asyncio
@asynctest.patch("app.pyppeteer.get_unique_session_id")
@asynctest.patch("app.pyppeteer.get_jsession_id")
@asynctest.patch("pyppeteer.page")
async def test_get_tokens_returns_session_and_unique_ids(
mock_page, mock_jsession_id, mock_unique_id
):
page = asynctest.CoroutineMock()
page.goto.return_value = asyncio.Future()
page.goto.return_value.set_result(None)
browser = asynctest.CoroutineMock()
browser.newPage.return_value = asyncio.Future()
browser.newPage.return_value.set_result(page)
mock_jsession_id = utils.set_async_result(mock_jsession_id, "test-jsession-id")
mock_unique_id = utils.set_async_result(mock_unique_id, "test-unique-id")
session_id, unique_session_id = await pyppeteer.get_tokens(browser)
mock_jsession_id.assert_called
mock_unique_id.assert_called
assert session_id == "test-jsession-id"
assert unique_session_id == "test-unique-id"
@pytest.mark.asyncio
@asynctest.patch("pyppeteer.page")
async def test_get_jsession_id_returns_jsession_id(mock_page):
mock_cookies = [
{"name": "foo", "value": "bar"},
{"name": "baz", "value": "qux"},
{"name": "quux", "value": "corge"},
{"name": "JSESSIONID", "value": "test-unique-jsession-id"},
]
mock_page.cookies.return_value = asyncio.Future()
mock_page.cookies.return_value.set_result(mock_cookies)
unique_session_id = await pyppeteer.get_jsession_id(mock_page)
mock_page.assert_called
assert unique_session_id == "test-unique-jsession-id"
@pytest.mark.asyncio
@asynctest.patch("pyppeteer.page")
async def test_get_unique_session_id_returns_evaluated_value(mock_page):
mock_page.evaluate.return_value = asyncio.Future()
mock_page.evaluate.return_value.set_result("mock-unique-session-id")
unique_session_id = await pyppeteer.get_unique_session_id(mock_page)
mock_page.assert_called
assert unique_session_id == "mock-unique-session-id"
@pytest.mark.asyncio
async def test_get_page_returns_page():
mock_page = asynctest.CoroutineMock()
mock_page.return_value.set_result("test-page")
mock_page.goto.return_value = asyncio.Future()
mock_page.goto.return_value.set_result("should-not-be-seen")
mock_browser = asynctest.CoroutineMock()
mock_browser.newPage.return_value = asyncio.Future()
mock_browser.newPage.return_value.set_result(mock_page)
await pyppeteer.get_page(mock_browser)
mock_browser.assert_called
mock_page.goto.assert_called
| 32.411111 | 83 | 0.761399 |
aceee9b37a17dc56376a5d82d2d70f6f8b31324e | 2,248 | py | Python | tasks/test_matrix_in_spiral_form.py | taland/py-playground | 2e11e4dfda52c933a73c7f00272b2064b01d2a62 | [
"MIT"
] | null | null | null | tasks/test_matrix_in_spiral_form.py | taland/py-playground | 2e11e4dfda52c933a73c7f00272b2064b01d2a62 | [
"MIT"
] | null | null | null | tasks/test_matrix_in_spiral_form.py | taland/py-playground | 2e11e4dfda52c933a73c7f00272b2064b01d2a62 | [
"MIT"
] | null | null | null | import pytest
from tasks.matrix_in_spiral_form import (
count_items_on_level,
get_item_coord_by_index,
get_item_coord_by_index_on_level,
iter_matrix_in_spiral_form,
)
@pytest.mark.parametrize("matrix_size,depth,level,expected_result", [
(4, 2, 1, 12),
])
def test_count_items_on_level(matrix_size, depth, level, expected_result):
actual_num = count_items_on_level(matrix_size, depth, level)
assert expected_result == actual_num
def test_exception_when_level_is_invalid():
with pytest.raises(ValueError):
count_items_on_level(4, depth=3, level=4)
def test_exception_when_level_is_zero():
with pytest.raises(ValueError):
count_items_on_level(4, depth=3, level=0)
@pytest.mark.parametrize("matrix_size,index,expected_result", [
(4, 0, (0, 0)),
(4, 1, (0, 1)),
(4, 2, (0, 2)),
(4, 3, (0, 3)),
(4, 4, (1, 3)),
(4, 5, (2, 3)),
(4, 6, (3, 3)),
(4, 7, (3, 2)),
(4, 8, (3, 1)),
(4, 9, (3, 0)),
(4, 10, (2, 0)),
(4, 11, (1, 0)),
])
def test_get_item_coord_by_index(matrix_size, index, expected_result):
coords = get_item_coord_by_index(matrix_size, index)
assert expected_result == coords
def test_exception_when_coord_is_out_of_scope():
# todo:
with pytest.raises(ValueError):
raise ValueError()
@pytest.mark.parametrize("matrix_size,index,level,expected_result", [
(4, 0, 1, (0, 0)),
(4, 1, 1, (0, 1)),
(4, 2, 1, (0, 2)),
(4, 3, 1, (0, 3)),
(4, 4, 1, (1, 3)),
(4, 5, 1, (2, 3)),
(4, 6, 1, (3, 3)),
(4, 7, 1, (3, 2)),
(4, 8, 1, (3, 1)),
(4, 9, 1, (3, 0)),
(4, 10, 1, (2, 0)),
(4, 11, 1, (1, 0)),
(4, 0, 2, (1, 1)),
(4, 1, 2, (1, 2)),
(4, 2, 2, (2, 2)),
(4, 3, 2, (2, 1)),
])
def test_get_item_coord_by_index_on_level(matrix_size, index, level, expected_result):
coords = get_item_coord_by_index_on_level(matrix_size, index, level)
assert expected_result == coords
def test_iter_matrix_in_spiral_form():
m = [
[1, 2, 3, 4],
[12, 13, 14, 5],
[11, 16, 15, 6],
[10, 9, 8, 7],
]
rs = [m[coord[0]][coord[1]] for coord in iter_matrix_in_spiral_form(m)]
assert rs == list(range(1, 17))
| 26.139535 | 86 | 0.580961 |
aceee9be86b8884c117778d75bdcd6381b6dab6f | 3,407 | py | Python | tests/test_enhancements/output/enumeration/evidence.py | deepakunni3/linkml | a335227b05b0290c21ebae50bb99e16eca57c8eb | [
"CC0-1.0"
] | 83 | 2021-03-17T16:31:02.000Z | 2022-03-13T23:17:02.000Z | tests/test_enhancements/output/enumeration/evidence.py | deepakunni3/linkml | a335227b05b0290c21ebae50bb99e16eca57c8eb | [
"CC0-1.0"
] | 390 | 2021-03-18T18:44:11.000Z | 2022-03-30T22:55:01.000Z | tests/test_enhancements/output/enumeration/evidence.py | deepakunni3/linkml | a335227b05b0290c21ebae50bb99e16eca57c8eb | [
"CC0-1.0"
] | 20 | 2021-03-27T08:55:56.000Z | 2022-02-24T15:25:57.000Z | # Auto generated from evidence.yaml by pythongen.py version: 0.9.0
# Generation date: 2021-07-09 16:24
# Schema: evidence
#
# id: http://example.org/test/evidence
# description:
# license: https://creativecommons.org/publicdomain/zero/1.0/
import dataclasses
import sys
import re
from jsonasobj2 import JsonObj, as_dict
from typing import Optional, List, Union, Dict, ClassVar, Any
from dataclasses import dataclass
from linkml_runtime.linkml_model.meta import EnumDefinition, PermissibleValue, PvFormulaOptions
from linkml_runtime.utils.slot import Slot
from linkml_runtime.utils.metamodelcore import empty_list, empty_dict, bnode
from linkml_runtime.utils.yamlutils import YAMLRoot, extended_str, extended_float, extended_int
from linkml_runtime.utils.dataclass_extensions_376 import dataclasses_init_fn_with_kwargs
from linkml_runtime.utils.formatutils import camelcase, underscore, sfx
from linkml_runtime.utils.enumerations import EnumDefinitionImpl
from rdflib import Namespace, URIRef
from linkml_runtime.utils.curienamespace import CurieNamespace
from linkml_runtime.linkml_model.types import String
metamodel_version = "1.7.0"
# Overwrite dataclasses _init_fn to add **kwargs in __init__
dataclasses._init_fn = dataclasses_init_fn_with_kwargs
# Namespaces
CLUE = CurieNamespace('CLUE', 'http://example.org/clue/')
EVIDENCE = CurieNamespace('evidence', 'http://example.org/test/evidence/')
LINKML = CurieNamespace('linkml', 'https://w3id.org/linkml/')
DEFAULT_ = EVIDENCE
# Types
# Class references
class EvidencerName(extended_str):
pass
@dataclass
class Evidencer(YAMLRoot):
_inherited_slots: ClassVar[List[str]] = []
class_class_uri: ClassVar[URIRef] = EVIDENCE.Evidencer
class_class_curie: ClassVar[str] = "evidence:Evidencer"
class_name: ClassVar[str] = "evidencer"
class_model_uri: ClassVar[URIRef] = EVIDENCE.Evidencer
name: Union[str, EvidencerName] = None
code: Union[str, "Evidence"] = None
def __post_init__(self, *_: List[str], **kwargs: Dict[str, Any]):
if self._is_empty(self.name):
self.MissingRequiredField("name")
if not isinstance(self.name, EvidencerName):
self.name = EvidencerName(self.name)
if self._is_empty(self.code):
self.MissingRequiredField("code")
if not isinstance(self.code, Evidence):
self.code = Evidence(self.code)
super().__post_init__(**kwargs)
# Enumerations
class Evidence(EnumDefinitionImpl):
"""
Permissible values for CLUE evidence fragments
"""
IEA = PermissibleValue(text="IEA",
description="Colonel Mustard in the Ballroom")
ISS = PermissibleValue(text="ISS",
description="Mrs. Peacock with the Dagger",
meaning=CLUE["1173"])
_defn = EnumDefinition(
name="Evidence",
description="Permissible values for CLUE evidence fragments",
code_set=CLUE.fragment_vd,
)
# Slots
class slots:
pass
slots.evidencer__name = Slot(uri=EVIDENCE.name, name="evidencer__name", curie=EVIDENCE.curie('name'),
model_uri=EVIDENCE.evidencer__name, domain=None, range=URIRef)
slots.evidencer__code = Slot(uri=EVIDENCE.code, name="evidencer__code", curie=EVIDENCE.curie('code'),
model_uri=EVIDENCE.evidencer__code, domain=None, range=Union[str, "Evidence"]) | 35.123711 | 101 | 0.726152 |
aceeeaad55cd2ee57b23f072cbd15514e747724d | 5,550 | py | Python | coursereg/utils.py | s-gv/bheemboy | b35c6611739b6df517cb1bb642fa6d46cf1b246e | [
"MIT"
] | null | null | null | coursereg/utils.py | s-gv/bheemboy | b35c6611739b6df517cb1bb642fa6d46cf1b246e | [
"MIT"
] | 105 | 2016-05-07T05:54:28.000Z | 2016-12-30T13:47:13.000Z | coursereg/utils.py | s-gv/bheemboy | b35c6611739b6df517cb1bb642fa6d46cf1b246e | [
"MIT"
] | 4 | 2016-05-29T14:00:33.000Z | 2020-09-30T17:16:02.000Z | from django.shortcuts import render, redirect
from django.core.urlresolvers import reverse
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.utils import timezone
from datetime import timedelta, datetime
from coursereg import models
from django.contrib.auth import update_session_auth_hash
from django.core.exceptions import PermissionDenied
from django.utils.http import urlquote_plus, urlunquote_plus
from django.db import transaction
from django.db.models import Model
from django.apps import apps
from django.contrib.contenttypes.fields import GenericForeignKey
def parse_datetime_str(date_str):
naive_date_str = ' '.join(date_str.split(' ')[:5])
offset_str = date_str.split(' ')[5][-5:]
offset_name = str(date_str.split(' ')[5][:-5])
naive_dt = datetime.strptime(naive_date_str, '%a %b %d %Y %H:%M:%S')
offset = int(offset_str[-4:-2])*60 + int(offset_str[-2:])
if offset_str[0] == "-":
offset = -offset
return naive_dt.replace(tzinfo=timezone.FixedOffset(offset, offset_name))
def datetime_to_str(datetime):
return datetime.strftime('%a %b %d %Y %H:%M:%S %z (%Z)')
@transaction.atomic
def merge_model_objects(primary_object, alias_objects=[], keep_old=False):
"""
Use this function to merge model objects (i.e. Users, Organizations, Polls,
etc.) and migrate all of the related fields from the alias objects to the
primary object.
https://djangosnippets.org/snippets/2283/
Usage:
from django.contrib.auth.models import User
primary_user = User.objects.get(email='good_email@example.com')
duplicate_user = User.objects.get(email='good_email+duplicate@example.com')
merge_model_objects(primary_user, duplicate_user)
"""
if not isinstance(alias_objects, list):
alias_objects = [alias_objects]
# check that all aliases are the same class as primary one and that
# they are subclass of model
primary_class = primary_object.__class__
if not issubclass(primary_class, Model):
raise TypeError('Only django.db.models.Model subclasses can be merged')
for alias_object in alias_objects:
if not isinstance(alias_object, primary_class):
print alias_object
raise TypeError('Only models of same class can be merged')
# Get a list of all GenericForeignKeys in all models
# TODO: this is a bit of a hack, since the generics framework should provide a similar
# method to the ForeignKey field for accessing the generic related fields.
generic_fields = []
for model in apps.get_models():
for field_name, field in filter(lambda x: isinstance(x[1], GenericForeignKey), model.__dict__.iteritems()):
generic_fields.append(field)
blank_local_fields = set([field.attname for field in primary_object._meta.local_fields if getattr(primary_object, field.attname) in [None, '']])
# Loop through all alias objects and migrate their data to the primary object.
for alias_object in alias_objects:
# Migrate all foreign key references from alias object to primary object.
for related_object in alias_object._meta.get_all_related_objects():
# The variable name on the alias_object model.
alias_varname = related_object.get_accessor_name()
# The variable name on the related model.
obj_varname = related_object.field.name
related_objects = getattr(alias_object, alias_varname)
for obj in related_objects.all():
setattr(obj, obj_varname, primary_object)
obj.save()
# Migrate all many to many references from alias object to primary object.
for related_many_object in alias_object._meta.get_all_related_many_to_many_objects():
alias_varname = related_many_object.get_accessor_name()
obj_varname = related_many_object.field.name
if alias_varname is not None:
# standard case
related_many_objects = getattr(alias_object, alias_varname).all()
else:
# special case, symmetrical relation, no reverse accessor
related_many_objects = getattr(alias_object, obj_varname).all()
for obj in related_many_objects.all():
getattr(obj, obj_varname).remove(alias_object)
getattr(obj, obj_varname).add(primary_object)
# Migrate all generic foreign key references from alias object to primary object.
for field in generic_fields:
filter_kwargs = {}
filter_kwargs[field.fk_field] = alias_object._get_pk_val()
filter_kwargs[field.ct_field] = field.get_content_type(alias_object)
for generic_related_object in field.model.objects.filter(**filter_kwargs):
setattr(generic_related_object, field.name, primary_object)
generic_related_object.save()
# Try to fill all missing values in primary object by values of duplicates
filled_up = set()
for field_name in blank_local_fields:
val = getattr(alias_object, field_name)
if val not in [None, '']:
setattr(primary_object, field_name, val)
filled_up.add(field_name)
blank_local_fields -= filled_up
if not keep_old:
alias_object.delete()
primary_object.save()
return primary_object
| 45.867769 | 148 | 0.700721 |
aceeeb1550431ed6929ed7352a1b7f680f34d6de | 1,066 | py | Python | simplemooc/courses/decorators.py | willsilvano/django-simplemooc | c6b7491ec600fbb0be563c4f3f0a0480f8f52270 | [
"MIT"
] | null | null | null | simplemooc/courses/decorators.py | willsilvano/django-simplemooc | c6b7491ec600fbb0be563c4f3f0a0480f8f52270 | [
"MIT"
] | null | null | null | simplemooc/courses/decorators.py | willsilvano/django-simplemooc | c6b7491ec600fbb0be563c4f3f0a0480f8f52270 | [
"MIT"
] | null | null | null | from django.contrib import messages
from django.shortcuts import get_object_or_404, redirect
from simplemooc.courses.models import Course, Enrollment
def enrollment_required(view_func):
def _wrapper(request, *args, **kwargs):
slug = kwargs['slug']
course = get_object_or_404(Course, slug=slug)
has_permission = request.user.is_staff
if not has_permission:
try:
enrollment = Enrollment.objects.get(user=request.user, course=course)
except Enrollment.DoesNotExist:
message = 'Desculpe, mas você não tem permissão para acessar esta página'
else:
if enrollment.is_approved():
has_permission = True
else:
message = 'A sua inscrição no curso ainda está pendente'
if not has_permission:
messages.error(request, message)
return redirect('accounts:dashboard')
request.course = course
return view_func(request, *args, **kwargs)
return _wrapper
| 36.758621 | 89 | 0.631332 |
aceeebd1ff1b8dd088c9f5df0edb630f5a108ac8 | 2,991 | py | Python | days/6/solution.py | tschmel/AdventOfCode-2019 | 5e01a98c2bc2a4245e4d16d6c1afbf1a887dd68c | [
"MIT"
] | null | null | null | days/6/solution.py | tschmel/AdventOfCode-2019 | 5e01a98c2bc2a4245e4d16d6c1afbf1a887dd68c | [
"MIT"
] | null | null | null | days/6/solution.py | tschmel/AdventOfCode-2019 | 5e01a98c2bc2a4245e4d16d6c1afbf1a887dd68c | [
"MIT"
] | null | null | null | from collections import defaultdict
class Universe:
def __init__(self):
self.graph = defaultdict(set)
self.planets = set()
def add_connection(self, orbit):
planet_one, planet_two = orbit.split(')')[::-1]
self.graph[planet_one].add(planet_two)
self.register_planet(planet_one)
self.register_planet(planet_two)
def register_planet(self, planet):
self.planets.add(planet)
def add_connections(self, orbits):
for orbit in orbits:
self.add_connection(orbit)
def get_planets_list(self):
return sorted(list(self.planets))
def get_number_of_orbits(self):
number_of_orbits = 0
for planet in self.planets:
sinle_path_length, _ = self.move_orbits_up(planet)
number_of_orbits += sinle_path_length
return number_of_orbits
def move_orbits_up(self, planet, path=None, number_of_moves=0):
if path is None:
path = []
if planet in self.graph.keys():
next_planet = next(iter(self.graph[planet]))
path.append(next_planet)
return self.move_orbits_up(
next_planet, path, number_of_moves + 1
)
else:
return number_of_moves, path
def find_matching_planet(self, path_you, path_san):
last_planet = path_you[-1]
for planet_you, planet_san in zip(
path_you[::-1], path_san[::-1]
):
if planet_you == planet_san:
last_planet = planet_you
else:
break
return last_planet
def steps_to_santa(self):
_, you_path = self.move_orbits_up('YOU')
_, san_path = self.move_orbits_up('SAN')
matching_planet = self.find_matching_planet(you_path, san_path)
steps_you = you_path.index(matching_planet)
steps_san = san_path.index(matching_planet)
return steps_you + steps_san
def read_input(path='days/6/data.txt'):
with open(path, 'r') as infile:
planet_map = infile.read()
planet_map = planet_map.split()
return planet_map
def test_orbits():
planet_map = [
'COM)B', 'B)C', 'C)D', 'D)E', 'E)F', 'B)G', 'G)H',
'D)I', 'E)J', 'J)K', 'K)L'
]
universe = Universe()
universe.add_connections(planet_map)
assert universe.get_number_of_orbits() == 42
def test_orbits_part_two():
planet_map_part_two = [
'COM)B', 'B)C', 'C)D', 'D)E', 'E)F', 'B)G', 'G)H', 'D)I', 'E)J',
'J)K', 'K)L', 'K)YOU', 'I)SAN'
]
universe_part_two = Universe()
universe_part_two.add_connections(planet_map_part_two)
assert universe_part_two.steps_to_santa() == 4
if __name__ == "__main__":
test_orbits()
planet_map = read_input()
universe = Universe()
universe.add_connections(planet_map)
print('Solution part 1:', universe.get_number_of_orbits())
print('Solution part 2:', universe.steps_to_santa())
| 26.705357 | 72 | 0.611501 |
aceeed377726611dd50b837cfd197b9934cf33c9 | 9,584 | py | Python | reals/core/schedule_classes.py | DanielLSM/real-schedule | 4b12aa2e012b41c670b9c62ccfd67398965b2372 | [
"MIT"
] | null | null | null | reals/core/schedule_classes.py | DanielLSM/real-schedule | 4b12aa2e012b41c670b9c62ccfd67398965b2372 | [
"MIT"
] | null | null | null | reals/core/schedule_classes.py | DanielLSM/real-schedule | 4b12aa2e012b41c670b9c62ccfd67398965b2372 | [
"MIT"
] | null | null | null | import numpy
import pandas as pd
import time
from collections import OrderedDict, defaultdict
from reals.core.resources import f1_in, f2_out
from reals.core.parser import book_to_kwargs_MPO
from reals.core.utils import advance_date, dates_between
def get_calendar(start_date, end_date, type='days'):
n_days = dates_between(start_date, end_date, type='days')
calendar = OrderedDict()
for _ in range(n_days + 1):
calendar[advance_date(start_date, days=_)] = {
'allowed': {
'public holidays': True,
'a-type': True,
'c-type': True
},
'resources': {
'slots': {
'a-type': 1,
'c-type': 1
}
}
}
return calendar
class Calendar:
def __init__(self, *args, **kwargs):
self.time_type = kwargs['time_type']
self.start_date = kwargs['start_date']
self.end_date = kwargs['end_date']
self.a_type = kwargs['a-type']
self.c_type = kwargs['c-type']
self.public_holidays = kwargs['all']
print("#########################")
print("INFO: setting up initial calendar")
calendar = get_calendar(self.start_date, self.end_date, type='days')
print("INFO: adding public holidays")
calendar = self.restrict_calendar(
calendar, self.public_holidays['time'], info='public holidays')
print("INFO: adding a-type calendar restrictions")
calendar = self.restrict_calendar(
calendar, self.a_type['time'], info='a-type')
print("INFO: adding c-type calendar restrictions")
calendar = self.restrict_calendar(
calendar, self.c_type['time'], info='c-type')
print("INFO: adding a-type resources (slots)")
calendar = self.add_resources(
calendar, self.a_type['resources'], typek='slots', info='a-type')
print("INFO: adding c-type resources (slots)")
calendar = self.add_resources(
calendar, self.c_type['resources'], typek='slots', info='a-type')
print("#########################")
self.calendar = calendar
print("INFO: calendar complete!")
@staticmethod
def restrict_calendar(calendar, restrict_list, info='not allowed'):
start_date = list(calendar.keys())[0]
end_date = list(calendar.keys())[-1]
for _ in restrict_list:
if _ > start_date and _ < end_date:
calendar[_]['allowed'][info] = False
return calendar
@staticmethod
def add_resources(calendar, restrict_dict, typek='slots', info='a-type'):
start_date = list(calendar.keys())[0]
end_date = list(calendar.keys())[-1]
for _ in restrict_dict[typek].keys():
if _ > start_date and _ < end_date:
calendar[_]['resources'][typek][info] += 1
return calendar
def plan(self, time_window):
pass
def reset_calendar(self):
pass
def render(self):
pass # something something tkinter?
class Fleet:
def __init__(self, start_date, end_date, *args, **kwargs):
self.start_date = start_date
self.end_date = end_date
self.aircraft_info = kwargs
def due_dates_from_info(self, start_date, end_date):
due_dates = OrderedDict()
time0 = time.time()
for aircraft in self.aircraft_info.keys():
due_dates[aircraft] = {
'a-type':
self.compute_due_dates_type_a(start_date, end_date, aircraft),
'c-type':
self.compute_due_dates_type_c(start_date, end_date, aircraft)
}
print(
"INFO: due dates of aircraft {} globally forecasted ELAPSED TIME {}"
.format(aircraft,
time.time() - time0))
return due_dates
def compute_due_date_type_a(self, start_date, end_date, aircraft):
DY_i = self.aircraft_info[aircraft]['A_Initial']['DY-A']
FH_i = self.aircraft_info[aircraft]['A_Initial']['FH-A']
FC_i = self.aircraft_info[aircraft]['A_Initial']['FC-A']
maxDY = self.aircraft_info[aircraft]['A_Initial']['ACI-DY']
maxFH = self.aircraft_info[aircraft]['A_Initial']['ACI-FH']
maxFC = self.aircraft_info[aircraft]['A_Initial']['ACI-FC']
due_date = self.compute_next_due_date(
aircraft,
start_date,
DY_i=DY_i,
FH_i=FH_i,
FC_i=FC_i,
maxDY=maxDY,
maxFH=maxFH,
maxFC=maxFC)
if due_date >= end_date:
due_date = None
return due_date
def compute_due_date_type_c(self, start_date, end_date, aircraft):
DY_i = self.aircraft_info[aircraft]['C_Initial']['DY-C']
FH_i = self.aircraft_info[aircraft]['C_Initial']['FH-C']
FC_i = self.aircraft_info[aircraft]['C_Initial']['FC-C']
maxDY = self.aircraft_info[aircraft]['C_Initial']['CCI-DY']
maxFH = self.aircraft_info[aircraft]['C_Initial']['CCI-FH']
maxFC = self.aircraft_info[aircraft]['C_Initial']['CCI-FC']
due_date = self.compute_next_due_date(
aircraft,
start_date,
DY_i=DY_i,
FH_i=FH_i,
FC_i=FC_i,
maxDY=maxDY,
maxFH=maxFH,
maxFC=maxFC)
if due_date >= end_date:
due_date = None
return due_date
#this was made purely for computacional speed, one less if every comp
def compute_next_due_date(self,
aircraft,
last_due_date=0,
DY_i=0,
FH_i=0,
FC_i=0,
maxDY=0,
maxFH=0,
maxFC=0):
DY, FH, FC = DY_i, FH_i, FC_i
try:
due_date = advance_date(last_due_date, DY)
except:
import ipdb
ipdb.set_trace()
while DY <= maxDY and FH <= maxFH and FC <= maxFC:
month = last_due_date.month_name()[0:3]
DY += 1
FH += self.aircraft_info[aircraft]['DFH'][month]
FC += self.aircraft_info[aircraft]['DFC'][month]
due_date = advance_date(due_date, days=int(DY))
return due_date
def compute_due_dates_type_a(self, start_date, end_date, aircraft):
due_dates = []
last_due_date = start_date
DY_i = self.aircraft_info[aircraft]['A_Initial']['DY-A']
FH_i = self.aircraft_info[aircraft]['A_Initial']['FH-A']
FC_i = self.aircraft_info[aircraft]['A_Initial']['FC-A']
maxDY = self.aircraft_info[aircraft]['A_Initial']['ACI-DY']
maxFH = self.aircraft_info[aircraft]['A_Initial']['ACI-FH']
maxFC = self.aircraft_info[aircraft]['A_Initial']['ACI-FC']
due_date = self.compute_next_due_date(
aircraft,
last_due_date,
DY_i=DY_i,
FH_i=FH_i,
FC_i=FC_i,
maxDY=maxDY,
maxFH=maxFH,
maxFC=maxFC)
if due_date <= end_date:
due_dates.append(due_date)
while due_date <= end_date:
due_date = self.compute_next_due_date(
aircraft, due_date, maxDY=maxDY, maxFH=maxFH, maxFC=maxFC)
if due_date < end_date:
due_dates.append(due_date)
return due_dates
def compute_due_dates_type_c(self, start_date, end_date, aircraft):
due_dates = []
last_due_date = start_date
DY_i = self.aircraft_info[aircraft]['C_Initial']['DY-C']
FH_i = self.aircraft_info[aircraft]['C_Initial']['FH-C']
FC_i = self.aircraft_info[aircraft]['C_Initial']['FC-C']
maxDY = self.aircraft_info[aircraft]['C_Initial']['CCI-DY']
maxFH = self.aircraft_info[aircraft]['C_Initial']['CCI-FH']
maxFC = self.aircraft_info[aircraft]['C_Initial']['CCI-FC']
due_date = self.compute_next_due_date(
aircraft,
last_due_date,
DY_i=DY_i,
FH_i=FH_i,
FC_i=FC_i,
maxDY=maxDY,
maxFH=maxFH,
maxFC=maxFC)
if due_date <= end_date:
due_dates.append(due_date)
while due_date <= end_date:
due_date = self.compute_next_due_date(
aircraft, due_date, maxDY=maxDY, maxFH=maxFH, maxFC=maxFC)
if due_date < end_date:
due_dates.append(due_date)
return due_dates
# this should be an abc abstract class
class FleetManagerBase:
def __init__(self, *args, **kwargs):
self.calendar = Calendar(**kwargs['restrictions'])
self.start_date = self.calendar.start_date
self.end_date = self.calendar.end_date
self.fleet = Fleet(
start_date=self.start_date,
end_date=self.end_date,
**kwargs['aircraft_info'])
if __name__ == '__main__':
import time
time0 = time.time()
from reals.core.parser import excel_to_book
try:
book = excel_to_book(f1_in)
except Exception as e:
print('you messed up')
raise e
kwargs = book_to_kwargs_MPO(book)
fmb = FleetManagerBase(**kwargs)
fmb.fleet.due_dates_from_info(fmb.calendar.start_date,
fmb.calendar.end_date)
# print('INFO: TOTAL TIME ELAPSED: {}'.format(time.time() - time0))
| 35.761194 | 84 | 0.570952 |
aceeeda02fa0bd4ed1cd1d6f7df906127ac1edc5 | 3,681 | py | Python | soundrts/clientserver.py | Finnboy94/soundrts | 284ffe2c507f4c9e44b4e5fa8c4ef05b6614c6c6 | [
"BSD-3-Clause"
] | 23 | 2015-04-02T16:54:08.000Z | 2022-03-02T09:48:04.000Z | soundrts/clientserver.py | Finnboy94/soundrts | 284ffe2c507f4c9e44b4e5fa8c4ef05b6614c6c6 | [
"BSD-3-Clause"
] | 94 | 2015-03-25T21:05:45.000Z | 2021-12-22T20:05:42.000Z | soundrts/clientserver.py | TifloDev/soundrts | 209695ed80b8746facdcb35f446f0f855c48da84 | [
"BSD-3-Clause"
] | 33 | 2015-05-27T05:53:14.000Z | 2021-12-08T02:45:44.000Z | import sys
import telnetlib
import threading
import time
from . import config
from . import msgparts as mp
from . import options, servermain
from .clientmedia import voice
from .clientservermenu import ServerMenu
from .clientversion import revision_checker
from .lib.log import exception, info
from .version import compatibility_version
class _Error(Exception):
pass
class UnreachableServerError(_Error):
pass
class WrongServerError(_Error):
pass
class CompatibilityOrLoginError(_Error):
pass
class ConnectionAbortedError(_Error):
pass
def server_delay(host, port):
t = time.time()
try:
with telnetlib.Telnet(host, port, 0.5) as tn:
try:
if tn.read_until(b":", 0.5) != b":":
return
else:
return time.time() - t
except EOFError:
return
except OSError:
return
class ServerInAThread(threading.Thread):
daemon = True
def __init__(self, parameters):
threading.Thread.__init__(self)
self.parameters = parameters
def run(self):
servermain.start_server(self.parameters, is_standalone=False)
def start_server_and_connect(parameters):
info("active threads: %s", threading.enumerate())
ServerInAThread(parameters).start()
time.sleep(0.01) # Linux needs a small delay (at least on the Eee PC 4G)
revision_checker.start_if_needed()
connect_and_play()
info("active threads: %s", threading.enumerate())
sys.exit()
def connect_and_play(host="127.0.0.1", port=options.port, auto=False):
try:
server = ConnectionToServer(host, port)
ServerMenu(server, auto=auto).loop()
server.close() # without this, the server isn't closed after a game
except UnreachableServerError:
voice.alert(mp.SERVER_UNREACHABLE)
except WrongServerError:
voice.alert(mp.UNEXPECTED_REPLY)
except CompatibilityOrLoginError:
voice.alert(mp.CONNECTION_REJECTED + mp.OR_LOGIN_REJECTED)
except ConnectionAbortedError:
voice.alert(mp.CONNECTION_INTERRUPTED)
except SystemExit:
raise
except:
voice.alert(mp.ERROR_DURING_CONNECTION)
exception("error during connection to server")
class ConnectionToServer:
data = b""
tn = None
def __init__(self, host, port):
self.host = host
self.port = port
if host is not None:
self.open()
def open(self):
try:
self.tn = telnetlib.Telnet(self.host, self.port, 1)
except OSError:
raise UnreachableServerError
try:
if self.tn.read_until(b":", 1) != b":":
raise WrongServerError
self.tn.write(
("login " + compatibility_version() + " %s\n" % config.login).encode()
)
except (EOFError, OSError):
raise WrongServerError
try:
if not self.tn.read_until(b"ok!", 1).endswith(b"ok!"):
raise EOFError
except EOFError:
raise CompatibilityOrLoginError
def close(self):
self.tn.close()
def read_line(self):
try:
self.data += self.tn.read_very_eager()
except: # EOFError or (10054, 'Connection reset by peer')
raise ConnectionAbortedError
if b"\n" in self.data:
line, self.data = self.data.split(b"\n", 1)
return line.decode("ascii")
def write_line(self, s):
try:
self.tn.write(s.encode("ascii") + b"\n")
except OSError: # connection aborted
raise ConnectionAbortedError
| 26.292857 | 86 | 0.622929 |
aceeee22d916a60f9843dba464fcebb925130745 | 239 | py | Python | modules/gdscript/config.py | feyleth/Godot_STL | 1f1366eccd0a3e34882378e6d4c1afd7aca0c274 | [
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 1 | 2022-03-12T03:52:55.000Z | 2022-03-12T03:52:55.000Z | modules/gdscript/config.py | feyleth/Godot_STL | 1f1366eccd0a3e34882378e6d4c1afd7aca0c274 | [
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | null | null | null | modules/gdscript/config.py | feyleth/Godot_STL | 1f1366eccd0a3e34882378e6d4c1afd7aca0c274 | [
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 3 | 2020-05-10T16:11:23.000Z | 2021-05-30T02:11:28.000Z | def can_build(env, platform):
return True
def configure(env):
pass
def get_doc_classes():
return [
"@GDScript",
"GDScript",
"GDScriptFunctionState",
]
def get_doc_path():
return "doc_classes"
| 14.9375 | 32 | 0.606695 |
aceeee967796668afcbd64e4168a6a026e09ebe6 | 33,679 | py | Python | lib/tool_shed/test/functional/test_0400_repository_component_reviews.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | null | null | null | lib/tool_shed/test/functional/test_0400_repository_component_reviews.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | 6 | 2021-11-11T20:57:49.000Z | 2021-12-10T15:30:33.000Z | lib/tool_shed/test/functional/test_0400_repository_component_reviews.py | beatrizserrano/galaxy | e149d9d32e1bca6c07c38b1a9cdabfee60323610 | [
"CC-BY-3.0"
] | null | null | null | from ..base.twilltestcase import (
common,
ShedTwillTestCase,
)
repository_name = "filtering_0400"
repository_description = "Galaxy filtering tool for test 0400"
repository_long_description = "Long description of Galaxy filtering tool for test 0400"
"""
1. Create users.
2. Grant reviewer role to test_user_2.
3. Check that the review components that are to be tested are defined in this tool shed instance.
4. Create a repository, owned by test_user_1, to be reviewed by test_user_2.
5. Review the datatypes component on the repository.
6. Check that no other components besides datatypes display as reviewed.
7. Review the functional tests component on the repository.
8. Check that only functional tests and datatypes display as reviewed.
9. Review the readme component on the repository.
10. Check that only functional tests, datatypes, and readme display as reviewed.
11. Review the repository dependencies component.
12. Check that only repository dependencies, functional tests, datatypes, and readme display as reviewed.
13. Review the tool dependencies component.
14. Check that only tool dependencies, repository dependencies, functional tests, datatypes, and readme display as reviewed.
15. Review the tools component.
16. Check that only tools, tool dependencies, repository dependencies, functional tests, datatypes, and readme display as reviewed.
17. Review the workflows component.
18. Check that all components display as reviewed.
19. Upload readme.txt to the repository.
20. Copy the previous review, and update the readme component review to reflect the existence of a readme file.
21. Check that the readme component review has been updated, and the other component reviews are present.
22. Upload test data to the repository. This will also create a new changeset revision.
23. Review the functional tests component on the repository, copying the other components from the previous review.
24. Verify that the functional tests component review has been updated, and as in step 21, the other reviews are unchanged.
25. Upload a new version of the tool.
26. Review the new revision's functional tests component.
27. Verify that the functional tests component review displays correctly.
"""
class TestRepositoryComponentReviews(ShedTwillTestCase):
"""Test repository component review features."""
def test_0000_initiate_users(self):
"""Create necessary user accounts and login as an admin user.
We are at step 1.
Create all the user accounts that are needed for this test script to run independently of other test.
Previously created accounts will not be re-created.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
test_user_1 = self.test_db_util.get_user(common.test_user_1_email)
assert (
test_user_1 is not None
), f"Problem retrieving user with email {common.test_user_1_email} from the database"
self.test_db_util.get_private_role(test_user_1)
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
test_user_2 = self.test_db_util.get_user(common.test_user_2_email)
assert (
test_user_2 is not None
), f"Problem retrieving user with email {common.test_user_2_email} from the database"
self.test_db_util.get_private_role(test_user_2)
self.login(email=common.admin_email, username=common.admin_username)
admin_user = self.test_db_util.get_user(common.admin_email)
assert admin_user is not None, f"Problem retrieving user with email {common.admin_email} from the database"
self.test_db_util.get_private_role(admin_user)
def test_0005_grant_reviewer_role(self):
"""Grant the repository reviewer role to test_user_2.
We are at step 2.
We now have an admin user (admin_user) and two non-admin users (test_user_1 and test_user_2). Grant the repository
reviewer role to test_user_2, who will not be the owner of the reviewed repositories.
"""
reviewer_role = self.test_db_util.get_role_by_name("Repository Reviewer")
test_user_2 = self.test_db_util.get_user(common.test_user_2_email)
self.grant_role_to_user(test_user_2, reviewer_role)
def test_0010_verify_repository_review_components(self):
"""Ensure that the required review components exist.
We are at step 3.
We now have an admin user (admin_user) and two non-admin users (test_user_1 and test_user_2). Grant the repository
reviewer role to test_user_2, who will not be the owner of the reviewed repositories.
"""
strings_not_displayed = ["Repository dependencies"]
self.manage_review_components(strings_not_displayed=strings_not_displayed)
self.add_repository_review_component(
name="Repository dependencies",
description="Repository dependencies defined in a file named repository_dependencies.xml included in the repository",
)
strings_displayed = [
"Data types",
"Functional tests",
"README",
"Repository dependencies",
"Tool dependencies",
"Tools",
"Workflows",
]
self.manage_review_components(strings_displayed=strings_displayed)
def test_0015_create_repository(self):
"""Create and populate the filtering repository
We are at step 4.
Log in as test_user_1 and create the filtering repository, then upload a basic set of
components to be reviewed in subsequent tests.
"""
category = self.create_category(
name="Test 0400 Repository Component Reviews", description="Test 0400 Repository Component Reviews"
)
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
strings_displayed = self.expect_repo_created_strings(repository_name)
repository = self.get_or_create_repository(
name=repository_name,
description=repository_description,
long_description=repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id(category.id),
strings_displayed=strings_displayed,
)
self.upload_file(
repository,
filename="filtering/filtering_1.1.0.tar",
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message="Uploaded filtering 1.1.0 tarball.",
strings_displayed=[],
strings_not_displayed=[],
)
def test_0020_review_initial_revision_data_types(self):
"""Review the datatypes component for the current tip revision.
We are at step 5.
Log in as test_user_2 and review the data types component of the filtering repository owned by test_user_1.
# Review this revision:
# Data types (N/A)
"""
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
# The create_repository_review method takes a dict( component label=review contents ).
# If review_contents is empty, it marks that component as not applicable. The review
# contents dict should have the structure:
# {
# rating: 1-5,
# comment: <text>
# approved: yes/no
# private: yes/no
# }
review_contents_dict = {"Data types": dict()}
self.create_repository_review(repository, review_contents_dict)
def test_0025_verify_datatype_review(self):
"""Verify that the datatypes component review displays correctly.
We are at step 6.
Log in as test_user_1 and check that the filtering repository only has a review for the data types component.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
strings_displayed = ["Data types", "not_applicable"]
strings_not_displayed = [
"Functional tests",
"README",
"Repository dependencies",
"Tool dependencies",
"Tools",
"Workflows",
]
self.verify_repository_reviews(
repository, reviewer=user, strings_displayed=strings_displayed, strings_not_displayed=strings_not_displayed
)
def test_0030_review_initial_revision_functional_tests(self):
"""Review the functional tests component for the current tip revision.
We are at step 7.
Log in as test_user_2 and review the functional tests component for this repository. Since the repository
has not been altered, this will update the existing review to add a component.
# Review this revision:
# Data types (N/A)
# Functional tests (One star, comment 'functional tests missing')
"""
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
# The create_repository_review method takes a dict( component label=review contents ).
# If review_contents is empty, it marks that component as not applicable. The review
# contents dict should have the structure:
# {
# rating: 1-5,
# comment: <text>
# approved: yes/no
# private: yes/no
# }
review_contents_dict = {
"Functional tests": dict(rating=1, comment="Functional tests missing", approved="no", private="yes")
}
self.review_repository(repository, review_contents_dict, user)
# def test_0030_verify_review_display( self ):
# """Verify that private reviews are restricted to owner and reviewer, and non-private views are viewable by others."""
# # Currently not implemented because third parties cannot view reviews whether they are private or not.
# self.login( email=common.test_user_3_email, username=common.test_user_3_name )
def test_0035_verify_functional_test_review(self):
"""Verify that the functional tests component review displays correctly.
We are at step 8.
Log in as test_user_1 and check that the filtering repository now has reviews
for the data types and functional tests components. Since the functional tests component was not marked as 'Not applicable',
also check for the review comment.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
strings_displayed = ["Functional tests", "Functional tests missing", "no"]
strings_not_displayed = ["README", "Repository dependencies", "Tool dependencies", "Tools", "Workflows"]
self.verify_repository_reviews(
repository, reviewer=user, strings_displayed=strings_displayed, strings_not_displayed=strings_not_displayed
)
def test_0040_review_readme(self):
"""Review the readme component for the current tip revision.
We are at step 9.
Log in as test_user_2 and update the review with the readme component marked as 'Not applicable'.
# Review this revision:
# Data types (N/A)
# Functional tests (One star, comment 'functional tests missing')
# README (N/A)
"""
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
# The create_repository_review method takes a dict( component label=review contents ).
# If review_contents is empty, it marks that component as not applicable. The review
# contents dict should have the structure:
# {
# rating: 1-5,
# comment: <text>
# approved: yes/no
# private: yes/no
# }
review_contents_dict = {"README": dict()}
self.review_repository(repository, review_contents_dict, user)
def test_0045_verify_readme_review(self):
"""Verify that the readme component review displays correctly.
We are at step 10.
Log in as test_user_1 and verify that the repository component reviews now include a review for the readme component.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
strings_displayed = ["README", "not_applicable"]
strings_not_displayed = ["Repository dependencies", "Tool dependencies", "Tools", "Workflows"]
self.verify_repository_reviews(
repository, reviewer=user, strings_displayed=strings_displayed, strings_not_displayed=strings_not_displayed
)
def test_0050_review_repository_dependencies(self):
"""Review the repository dependencies component for the current tip revision.
We are at step 11.
Log in as test_user_2 and update the review with the repository dependencies component marked as 'Not applicable'.
# Review this revision:
# Data types (N/A)
# Functional tests (One star, comment 'functional tests missing')
# README (N/A)
# Repository dependencies (N/A)
"""
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
# The create_repository_review method takes a dict( component label=review contents ).
# If review_contents is empty, it marks that component as not applicable. The review
# contents dict should have the structure:
# {
# rating: 1-5,
# comment: <text>
# approved: yes/no
# private: yes/no
# }
review_contents_dict = {"Repository dependencies": dict()}
self.review_repository(repository, review_contents_dict, user)
def test_0055_verify_repository_dependency_review(self):
"""Verify that the repository dependencies component review displays correctly.
We are at step 12.
Log in as test_user_1 and verify that the repository component reviews now include a review
for the repository dependencies component.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
strings_displayed = ["Repository dependencies", "not_applicable"]
strings_not_displayed = ["Tool dependencies", "Tools", "Workflows"]
self.verify_repository_reviews(
repository, reviewer=user, strings_displayed=strings_displayed, strings_not_displayed=strings_not_displayed
)
def test_0060_review_tool_dependencies(self):
"""Review the tool dependencies component for the current tip revision.
We are at step 13.
Log in as test_user_2 and update the review with the tool dependencies component marked as 'Not applicable'.
# Review this revision:
# Data types (N/A)
# Functional tests (One star, comment 'functional tests missing')
# README (N/A)
# Repository dependencies (N/A)
# Tool dependencies (N/A)
"""
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
# The create_repository_review method takes a dict( component label=review contents ).
# If review_contents is empty, it marks that component as not applicable. The review
# contents dict should have the structure:
# {
# rating: 1-5,
# comment: <text>
# approved: yes/no
# private: yes/no
# }
review_contents_dict = {"Tool dependencies": dict()}
self.review_repository(repository, review_contents_dict, user)
def test_0065_verify_tool_dependency_review(self):
"""Verify that the tool dependencies component review displays correctly.
We are at step 14.
Log in as test_user_1 and verify that the repository component reviews now include a review
for the tool dependencies component.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
strings_displayed = ["Tool dependencies", "not_applicable"]
strings_not_displayed = ["Tools", "Workflows"]
self.verify_repository_reviews(
repository, reviewer=user, strings_displayed=strings_displayed, strings_not_displayed=strings_not_displayed
)
def test_0070_review_tools(self):
"""Review the tools component for the current tip revision.
We are at step 15.
Log in as test_user_2 and update the review with the tools component given
a favorable review, with 5 stars, and approved status.
# Review this revision:
# Data types (N/A)
# Functional tests (One star, comment 'functional tests missing')
# README (N/A)
# Repository dependencies (N/A)
# Tool dependencies (N/A)
# Tools (5 stars, good review)
"""
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
# The create_repository_review method takes a dict( component label=review contents ).
# If review_contents is empty, it marks that component as not applicable. The review
# contents dict should have the structure:
# {
# rating: 1-5,
# comment: <text>
# approved: yes/no
# private: yes/no
# }
review_contents_dict = {
"Tools": dict(rating=5, comment="Excellent tool, easy to use.", approved="yes", private="no")
}
self.review_repository(repository, review_contents_dict, user)
def test_0075_verify_tools_review(self):
"""Verify that the tools component review displays correctly.
We are at step 16.
Log in as test_user_1 and verify that the repository component reviews now include a review
for the tools component. As before, check for the presence of the comment on this review.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
strings_displayed = ["Tools", "yes", "Excellent tool, easy to use."]
strings_not_displayed = ["Workflows"]
self.verify_repository_reviews(
repository, reviewer=user, strings_displayed=strings_displayed, strings_not_displayed=strings_not_displayed
)
def test_0080_review_workflows(self):
"""Review the workflows component for the current tip revision.
We are at step 17.
Log in as test_user_2 and update the review with the workflows component marked as 'Not applicable'.
# Review this revision:
# Data types (N/A)
# Functional tests (One star, comment 'functional tests missing')
# README (N/A)
# Repository dependencies (N/A)
# Tool dependencies (N/A)
# Tools (5 stars, good review)
# Workflows (N/A)
"""
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
# The create_repository_review method takes a dict( component label=review contents ).
# If review_contents is empty, it marks that component as not applicable. The review
# contents dict should have the structure:
# {
# rating: 1-5,
# comment: <text>
# approved: yes/no
# private: yes/no
# }
review_contents_dict = {"Workflows": dict()}
self.review_repository(repository, review_contents_dict, user)
def test_0085_verify_workflows_review(self):
"""Verify that the workflows component review displays correctly.
We are at step 18.
Log in as test_user_1 and verify that the repository component reviews now include a review
for the workflows component.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
strings_displayed = ["Workflows", "not_applicable"]
self.verify_repository_reviews(repository, reviewer=user, strings_displayed=strings_displayed)
def test_0090_upload_readme_file(self):
"""Upload a readme file to the filtering repository.
We are at step 19.
Log in as test_user_1, the repository owner, and upload readme.txt to the repository. This will create
a new changeset revision for this repository, which will need to be reviewed.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
self.upload_file(
repository,
filename="readme.txt",
filepath=None,
valid_tools_only=True,
uncompress_file=False,
remove_repo_files_not_in_tar=False,
commit_message="Uploaded readme.txt.",
strings_displayed=[],
strings_not_displayed=[],
)
def test_0095_review_new_changeset_readme_component(self):
"""Update the filtering repository's readme component review to reflect the presence of the readme file.
We are at step 20.
There is now a new changeset revision in the repository's changelog, but it has no review associated with it.
Get the previously reviewed changeset hash, and pass that and the review id to the create_repository_review
method, in order to copy the previous review's contents. Then update the new review to reflect the presence of
a readme file.
"""
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
# Get the last changeset revision that has a review associated with it.
last_review = self.get_last_reviewed_revision_by_user(user, repository)
if last_review is None:
raise AssertionError("Previous review expected, none found.")
# The create_repository_review method takes a dict( component label=review contents ).
# If review_contents is empty, it marks that component as not applicable. The review
# contents dict should have the structure:
# {
# rating: 1-5,
# comment: <text>
# approved: yes/no
# private: yes/no
# }
review_contents_dict = {
"README": dict(
rating=5,
comment="Clear and concise readme file, a true pleasure to read.",
approved="yes",
private="no",
)
}
self.create_repository_review(
repository,
review_contents_dict,
changeset_revision=self.get_repository_tip(repository),
copy_from=(str(last_review.changeset_revision), last_review.id),
)
def test_0100_verify_readme_review(self):
"""Verify that the readme component review displays correctly.
We are at step 21.
Log in as the repository owner (test_user_1) and check the repository component reviews to
verify that the readme component is now reviewed and approved.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
strings_displayed = ["README", "yes", "Clear and concise readme file, a true pleasure to read."]
self.verify_repository_reviews(repository, reviewer=user, strings_displayed=strings_displayed)
def test_0105_upload_test_data(self):
"""Upload the missing test data to the filtering repository.
We are at step 22.
Remain logged in as test_user_1 and upload test data to the repository. This will also create a
new changeset revision that needs to be reviewed. This will replace the changeset hash associated with
the last dowloadable revision, but the last repository review will still be associated with the
last dowloadable revision hash.
"""
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
self.upload_file(
repository,
filename="filtering/filtering_test_data.tar",
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message="Uploaded filtering test data.",
strings_displayed=[],
strings_not_displayed=[],
)
def test_0110_review_new_changeset_functional_tests(self):
"""Update the filtering repository's readme component review to reflect the presence of the readme file.
We are at step 23.
Log in as test_user_2 and get the last reviewed changeset hash, and pass that and the review id to
the create_repository_review method, then update the copied review to approve the functional tests
component.
"""
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
# Get the changeset immediately prior to the tip, and pass it to the create review method.
last_review = self.get_last_reviewed_revision_by_user(user, repository)
# The create_repository_review method takes a dict( component label=review contents ).
# If review_contents is empty, it marks that component as not applicable. The review
# contents dict should have the structure:
# {
# rating: 1-5,
# comment: <text>
# approved: yes/no
# private: yes/no
# }
review_contents_dict = {
"Functional tests": dict(rating=5, comment="A good set of functional tests.", approved="yes", private="no")
}
self.create_repository_review(
repository,
review_contents_dict,
changeset_revision=self.get_repository_tip(repository),
copy_from=(str(last_review.changeset_revision), last_review.id),
)
def test_0115_verify_functional_tests_review(self):
"""Verify that the functional tests component review displays correctly.
We are at step 24.
Log in as the repository owner, test_user_1, and verify that the new revision's functional tests component
review has been updated with an approved status and favorable comment.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
strings_displayed = ["Functional tests", "yes", "A good set of functional tests."]
self.verify_repository_reviews(repository, reviewer=user, strings_displayed=strings_displayed)
def test_0120_upload_new_tool_version(self):
"""Upload filtering 2.2.0 to the filtering repository.
We are at step 25.
Log in as test_user_1 and upload a new version of the tool to the filtering repository. This will create
a new downloadable revision, with no associated repository component reviews.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
self.upload_file(
repository,
filename="filtering/filtering_2.2.0.tar",
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message="Uploaded filtering 2.2.0 tarball.",
strings_displayed=[],
strings_not_displayed=[],
)
def test_0125_review_new_changeset_functional_tests(self):
"""Update the filtering repository's review to apply to the new changeset with filtering 2.2.0.
We are at step 26.
Log in as test_user_2 and copy the last review for this repository to the new changeset. Then
update the tools component review to refer to the new tool version.
"""
self.login(email=common.test_user_2_email, username=common.test_user_2_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
last_review = self.get_last_reviewed_revision_by_user(user, repository)
# Something needs to change so that the review will save.
# The create_repository_review method takes a dict( component label=review contents ).
# If review_contents is empty, it marks that component as not applicable. The review
# contents dict should have the structure:
# {
# rating: 1-5,
# comment: <text>
# approved: yes/no
# private: yes/no
# }
review_contents_dict = {
"Tools": dict(
rating=5,
comment="Version 2.2.0 does the impossible and improves this tool.",
approved="yes",
private="yes",
)
}
self.create_repository_review(
repository,
review_contents_dict,
changeset_revision=self.get_repository_tip(repository),
copy_from=(str(last_review.changeset_revision), last_review.id),
)
def test_0135_verify_review_for_new_version(self):
"""Verify that the reviews display correctly for this changeset revision.
We are at step 27.
Log in as test_user_1 and check that the tools component review is for filtering 2.2.0, but that the other component
reviews had their contents copied from the last reviewed changeset.
"""
self.login(email=common.test_user_1_email, username=common.test_user_1_name)
repository = self.test_db_util.get_repository_by_name_and_owner(repository_name, common.test_user_1_name)
user = self.test_db_util.get_user(common.test_user_2_email)
strings_displayed = [
"Data types",
"Functional tests",
"yes",
"A good set of functional tests.",
"README",
"yes",
"Workflows",
"Tools",
]
strings_displayed.extend(
["Clear and concise readme file, a true pleasure to read.", "Tool dependencies", "not_applicable"]
)
strings_displayed.extend(
["Repository dependencies", "Version 2.2.0 does the impossible and improves this tool."]
)
self.verify_repository_reviews(repository, reviewer=user, strings_displayed=strings_displayed)
| 50.493253 | 132 | 0.690074 |
aceeeec1bf82d8ca7dc2a5f647deaea4fb84a878 | 2,400 | py | Python | catalyst_rl/utils/scripts.py | rhololkeolke/catalyst-rl | ec18ff4a58b6d00652f772231db8de86debb4b3d | [
"Apache-2.0"
] | 46 | 2020-03-27T20:12:32.000Z | 2021-11-21T19:08:51.000Z | catalyst_rl/utils/scripts.py | rhololkeolke/catalyst-rl | ec18ff4a58b6d00652f772231db8de86debb4b3d | [
"Apache-2.0"
] | 2 | 2020-04-06T10:43:04.000Z | 2020-07-01T18:26:10.000Z | catalyst_rl/utils/scripts.py | rhololkeolke/catalyst-rl | ec18ff4a58b6d00652f772231db8de86debb4b3d | [
"Apache-2.0"
] | 5 | 2020-04-17T14:09:53.000Z | 2021-05-10T08:58:29.000Z | from importlib.util import module_from_spec, spec_from_file_location
import os
import pathlib
import shutil
import sys
from .misc import get_utcnow_time
from .notebook import save_notebook
def import_module(expdir: pathlib.Path):
# @TODO: better PYTHONPATH handling
if not isinstance(expdir, pathlib.Path):
expdir = pathlib.Path(expdir)
sys.path.insert(0, str(expdir.absolute()))
sys.path.insert(0, os.path.dirname(str(expdir.absolute())))
s = spec_from_file_location(
expdir.name,
str(expdir.absolute() / "__init__.py"),
submodule_search_locations=[expdir.absolute()]
)
m = module_from_spec(s)
s.loader.exec_module(m)
sys.modules[expdir.name] = m
return m
def _tricky_dir_copy(dir_from, dir_to):
os.makedirs(dir_to, exist_ok=True)
shutil.rmtree(dir_to)
shutil.copytree(dir_from, dir_to)
def dump_code(expdir, logdir):
expdir = expdir[:-1] if expdir.endswith("/") else expdir
new_src_dir = f"code"
# @TODO: hardcoded
old_pro_dir = os.path.dirname(os.path.abspath(__file__)) + "/../"
new_pro_dir = os.path.join(logdir, new_src_dir, "catalyst_rl")
_tricky_dir_copy(old_pro_dir, new_pro_dir)
old_expdir = os.path.abspath(expdir)
expdir_ = os.path.basename(old_expdir)
new_expdir = os.path.join(logdir, new_src_dir, expdir_)
_tricky_dir_copy(old_expdir, new_expdir)
def dump_python_files(src, dst):
py_files = list(src.glob("*.py"))
ipynb_files = list(src.glob("*.ipynb"))
for filepath in ipynb_files:
save_notebook(filepath)
py_files += ipynb_files
py_files = list(set(py_files))
for py_file in py_files:
shutil.copy2(f"{str(py_file.absolute())}", f"{dst}/{py_file.name}")
def import_experiment_and_runner(expdir: pathlib.Path):
if not isinstance(expdir, pathlib.Path):
expdir = pathlib.Path(expdir)
m = import_module(expdir)
Experiment, Runner = m.Experiment, m.Runner
return Experiment, Runner
def dump_base_experiment_code(src: pathlib.Path, dst: pathlib.Path):
utcnow = get_utcnow_time()
dst_ = dst.joinpath("code")
dst = dst.joinpath(f"code-{utcnow}") if dst_.exists() else dst_
os.makedirs(dst, exist_ok=True)
dump_python_files(src, dst)
__all__ = [
"import_module", "dump_code", "dump_python_files",
"import_experiment_and_runner", "dump_base_experiment_code"
]
| 29.62963 | 75 | 0.702083 |
aceef089357c29c9cc602f7d273d0cea62a8782c | 10,750 | py | Python | vistrails/packages/matplotlib/diff.py | celiafish/VisTrails | d8cb575b8b121941de190fe608003ad1427ef9f6 | [
"BSD-3-Clause"
] | 1 | 2015-05-11T16:46:49.000Z | 2015-05-11T16:46:49.000Z | vistrails/packages/matplotlib/diff.py | celiafish/VisTrails | d8cb575b8b121941de190fe608003ad1427ef9f6 | [
"BSD-3-Clause"
] | null | null | null | vistrails/packages/matplotlib/diff.py | celiafish/VisTrails | d8cb575b8b121941de190fe608003ad1427ef9f6 | [
"BSD-3-Clause"
] | null | null | null | import re
from xml.etree import ElementTree as ET
from specs import SpecList, ModuleSpec, InputPortSpec, OutputPortSpec, \
AlternatePortSpec
def compute_ps_diff(root, in_ps_list, out_ps_list, code_ref, qualifier,
port=None):
if qualifier == "alternate":
if port is None:
raise ValueError("Must specify port with alternate")
out_port_specs = dict((ps.name, ps) for ps in out_ps_list)
in_port_specs = dict((ps.name, ps) for ps in in_ps_list)
else:
out_port_specs = dict((ps.arg, ps) for ps in out_ps_list)
in_port_specs = dict((ps.arg, ps) for ps in in_ps_list)
out_port_specs_set = set(out_port_specs.iterkeys())
in_port_specs_set = set(in_port_specs.iterkeys())
for arg in in_port_specs_set - out_port_specs_set:
print "- %s.%s.%s" % (code_ref, qualifier, arg)
elt = ET.Element("deletePortSpec")
elt.set("code_ref", code_ref)
if qualifier == "alternate":
elt.set("port", port)
elt.set("altName", arg)
else:
elt.set("port", arg)
elt.set("type", qualifier)
root.append(elt)
for arg, out_ps in out_port_specs.iteritems():
if arg not in in_port_specs:
print "out_ps:", out_ps
print "+ %s.%s.%s %s" % (code_ref, qualifier, arg,
ET.tostring(out_ps.to_xml()))
elt = ET.Element("addPortSpec")
elt.set("code_ref", code_ref)
if qualifier == "alternate":
elt.set("port", port)
elt.set("altName", arg)
else:
elt.set("port", arg)
elt.set("type", qualifier)
subelt = ET.Element("value")
subelt.append(out_ps.to_xml())
elt.append(subelt)
root.append(elt)
continue
in_ps = in_port_specs[arg]
if qualifier == "output":
attr_list = OutputPortSpec.attrs
elif qualifier == "input":
attr_list = InputPortSpec.attrs
elif qualifier == "alternate":
attr_list = AlternatePortSpec.attrs
else:
raise ValueError('Unknown port type "%s"' % qualifier)
for attr in attr_list:
in_val = getattr(in_ps, attr)
out_val = getattr(out_ps, attr)
if in_val != out_val:
print "C %s.%s.%s.%s %s" % (code_ref, qualifier, arg, attr,
out_val)
elt = ET.Element("changePortSpec")
elt.set("code_ref", code_ref)
if qualifier == "alternate":
elt.set("port", port)
elt.set("altName", arg)
else:
elt.set("port", arg)
elt.set("type", qualifier)
elt.set("attr", attr)
subelt = ET.Element("value")
subelt.text = str(out_val)
elt.append(subelt)
root.append(elt)
# only do this for input right now
if qualifier == "input":
compute_ps_diff(root, in_ps.alternate_specs, out_ps.alternate_specs,
code_ref, "alternate", arg)
def compute_diff(in_fname, out_fname, diff_fname):
in_specs = SpecList.read_from_xml(in_fname)
out_specs = SpecList.read_from_xml(out_fname)
in_refs = dict((spec.code_ref, spec) for spec in in_specs.module_specs)
out_refs = dict((spec.code_ref, spec) for spec in out_specs.module_specs)
in_refs_set = set(in_refs.iterkeys())
out_refs_set = set(out_refs.iterkeys())
root = ET.Element("diff")
if in_specs.custom_code != out_specs.custom_code:
elt = ET.Element("changeCustomCode")
subelt = ET.Element("value")
subelt.text = out_specs.custom_code
elt.append(subelt)
root.append(elt)
for ref in in_refs_set - out_refs_set:
print "- %s" % ref
elt = ET.Element("deleteModule")
elt.set("code_ref", ref)
root.append(elt)
for code_ref, out_spec in out_refs.iteritems():
# need to check port specs, which removed, which added
if code_ref not in in_refs:
print "+ %s %s" % (ref, ET.tostring(out_spec.to_xml()))
elt = ET.Element("addModule")
elt.set("code_ref", ref)
subelt = ET.Element("value")
subelt.append(out_spec.to_xml())
elt.append(subelt)
root.append(elt)
continue
in_spec = in_refs[code_ref]
for attr in ModuleSpec.attrs:
in_val = getattr(in_spec, attr)
out_val = getattr(out_spec, attr)
if in_val != out_val:
print "C %s.%s %s" % (out_spec.code_ref, attr, out_val)
elt = ET.Element("changeModule")
elt.set("code_ref", out_spec.code_ref)
elt.set("attr", attr)
subelt = ET.Element("value")
subelt.text = str(out_val)
elt.append(subelt)
root.append(elt)
compute_ps_diff(root, in_spec.port_specs, out_spec.port_specs,
code_ref, "input")
compute_ps_diff(root, in_spec.output_port_specs,
out_spec.output_port_specs,
code_ref, "output")
tree = ET.ElementTree(root)
def indent(elem, level=0):
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
indent(tree.getroot())
tree.write(diff_fname)
def apply_diff(in_fname, diff_fname, out_fname):
in_specs = SpecList.read_from_xml(in_fname)
in_refs = dict((spec.code_ref, (i, spec))
for i, spec in enumerate(in_specs.module_specs))
in_ips_refs = dict(((spec.code_ref, ps.arg), (i, ps))
for spec in in_specs.module_specs
for i, ps in enumerate(spec.port_specs))
in_ops_refs = dict(((spec.code_ref, ps.arg), (i, ps))
for spec in in_specs.module_specs
for i, ps in enumerate(spec.output_port_specs))
in_alt_refs = dict(((spec.code_ref, ps.arg, alt_ps.name), (i, ps))
for spec in in_specs.module_specs
for ps in spec.port_specs
for i, alt_ps in enumerate(ps.alternate_specs))
tree = ET.parse(diff_fname)
for elt in tree.getroot():
if elt.tag == "changeCustomCode":
val = elt.getchildren()[0].text
in_specs.custom_code = val
continue
code_ref = elt.get("code_ref")
m_spec = in_refs[code_ref][1]
port = elt.get("port", None)
if port:
port_type = elt.get("type")
if port_type == "alternate":
alt_name = elt.get("altName")
if elt.tag.startswith('delete'):
if port:
if port_type == "input":
idx = in_ips_refs[(code_ref, port)][0]
del m_spec.port_specs[idx]
elif port_type == 'output':
idx = in_ops_refs[(code_ref, port)][0]
del m_spec.output_port_specs[idx]
elif port_type == 'alternate':
ps = in_ips_refs[(code_ref, port)][1]
idx = in_alt_refs[(code_ref, port, alt_name)][0]
del ps.alternate_specs[idx]
else:
raise ValueError('Cannot access list of type "%s"' %
port_type)
else:
idx = in_refs[code_ref][0]
del in_specs.module_specs[idx]
elif elt.tag.startswith('add'):
for child in elt.getchildren():
if child.tag == 'value':
for subchild in child.getchildren():
value = subchild
if port:
if port_type == "input":
m_spec.port_specs.append(InputPortSpec.from_xml(value))
elif port_type == "output":
m_spec.output_port_specs.append(
OutputPortSpec.from_xml(value))
elif port_type == "alternate":
ps = in_ips_refs[(code_ref, port)][1]
ps.alternate_specs.append(AlternatePortSpec.from_xml(value))
else:
raise ValueError('Cannot access list of type "%s"' %
port_type)
else:
in_specs.module_specs.append(ModuleSpec.from_xml(value))
elif elt.tag.startswith('change'):
attr = elt.get("attr")
for child in elt.getchildren():
if child.tag == 'value':
value = child.text
if port:
# KLUDGE to fix change from output_type to port_type
if attr == "output_type":
attr = "port_type"
if port_type == "input":
port_spec = in_ips_refs[(code_ref, port)][1]
setattr(port_spec, attr, value)
elif port_type == "output":
port_spec = in_ops_refs[(code_ref, port)][1]
setattr(port_spec, attr, value)
elif port_type == "alternate":
port_spec = in_alt_refs[(code_ref, port, alt_name)][1]
setattr(port_spec, attr, value)
else:
setattr(m_spec, attr, value)
in_specs.write_to_xml(out_fname)
def run_compute():
compute_diff("mpl_artists_raw.xml", "mpl_artists.xml",
"mpl_artists_diff.xml")
compute_diff("mpl_plots_raw.xml", "mpl_plots.xml", "mpl_plots_diff.xml")
def run_apply():
apply_diff("mpl_artists_raw.xml", "mpl_artists_diff.xml", "mpl_artists.xml")
apply_diff("mpl_plots_raw.xml", "mpl_plots_diff.xml", "mpl_plots.xml")
def usage():
print "Usage: %s %s [apply|compute]" % (sys.executable, sys.argv[0])
if __name__ == '__main__':
import sys
if len(sys.argv) < 2:
usage()
elif sys.argv[1] == "apply":
run_apply()
elif sys.argv[1] == "compute":
run_compute()
else:
usage()
| 39.233577 | 80 | 0.531442 |
aceef0c54aeaa0701c81c489aff7411dc3594461 | 2,050 | py | Python | 06-higher-order-functions/main.py | johnehunt/PythonIntroLabsDS | d0d63730e9bacefa99225559be98fb933eed15c8 | [
"Apache-2.0"
] | 1 | 2021-05-16T10:24:19.000Z | 2021-05-16T10:24:19.000Z | 06-higher-order-functions/main.py | johnehunt/PythonIntroLabsDS | d0d63730e9bacefa99225559be98fb933eed15c8 | [
"Apache-2.0"
] | null | null | null | 06-higher-order-functions/main.py | johnehunt/PythonIntroLabsDS | d0d63730e9bacefa99225559be98fb933eed15c8 | [
"Apache-2.0"
] | null | null | null | # Illustrates use of higher order functions such as filter, map and reduce
from functools import reduce
def average(data):
return sum(data) / len(data)
def median(data):
sorted_data = sorted(data)
data_length = len(data)
index = (data_length - 1) // 2
if data_length % 2:
return sorted_data[index]
else:
return (sorted_data[index] +
sorted_data[index + 1]) / 2.0
def minimum(data, index=0):
if index == 0:
data_slice = data
else:
data_slice = data[index:]
return min(data_slice)
def maximum(data, index=0):
if index == 0:
data_slice = data
else:
data_slice = data[index:]
return max(data_slice)
def data_range(data):
return minimum(data), maximum(data)
def celsius_to_fahrenheit(celsius):
return (celsius * 9 / 5) + 32
def fahrenheit_to_celsius(fahrenheit):
return (fahrenheit - 32) * 5/9
# Set up the data the data file
readings = [13.5, 12.6, 15.3, 12.2, 16.6, 14.6, 15.6]
print(f'Readings: {readings}')
# Find minimum, maximum etc in readings
print('Min temp in list =', minimum(readings))
print('Max temp in list =', maximum(readings))
print('Average temperature = {:.2f}'.format(average(readings)))
print('Median temperature value =', median(readings))
readings_range = data_range(readings)
print(f'Range of temperatures from {readings_range[0]} to {readings_range[1]}')
# Convert all the temperatures from Celsius to fahrenheit
fahrenheit_temperatures = list(map(celsius_to_fahrenheit, readings))
print(f'Fahrenheit Temperatures: {fahrenheit_temperatures}')
# Find all temperatures above 14.0
higher_temperatures = list(filter(lambda r: r > 14.0, readings))
print(f'Temperatures above 14.0: {higher_temperatures}')
# Total all the readings
result = reduce(lambda total, value: total + value, readings)
print(f'Total value of all readings is {result}')
# Convert all readings above 14.0 to fahrenheit
converted_temperatures = list(map(celsius_to_fahrenheit, filter(lambda r: r > 15.5, readings)))
print(f'Fahrenheit Temperatures above 14.0c: {converted_temperatures}')
print('Done')
| 25.625 | 95 | 0.730244 |
aceef1815b05c8a03719397ece01f173c36810f7 | 15,529 | py | Python | grow/conversion/content_locale_split.py | tabulon-ext/grow | 2929c3e9b467a7768d5b5055fe965fbb5106603f | [
"MIT"
] | 335 | 2016-04-02T20:12:21.000Z | 2022-03-28T18:55:26.000Z | grow/conversion/content_locale_split.py | tabulon-ext/grow | 2929c3e9b467a7768d5b5055fe965fbb5106603f | [
"MIT"
] | 784 | 2016-04-01T16:56:41.000Z | 2022-03-05T01:25:34.000Z | grow/conversion/content_locale_split.py | tabulon-ext/grow | 2929c3e9b467a7768d5b5055fe965fbb5106603f | [
"MIT"
] | 54 | 2016-05-03T13:06:15.000Z | 2021-09-24T04:46:23.000Z | """
Utility for converting Grow sites that use multiple locales in one repository
over to a format that uses separate files for each locale.
This is required for all versions of Grow after 0.1.0.
Usage:
grow convert --type=content_locale_split
"""
from boltons import iterutils
from collections import OrderedDict
import collections
import copy
import logging
import os
import re
import yaml
try:
from yaml import CLoader as yaml_Loader
except ImportError:
from yaml import Loader as yaml_Loader
class Error(Exception):
def __init__(self, message):
super(Error, self).__init__(message)
self.message = message
class LocaleExistsError(Error):
pass
class LocaleMissingError(Error):
pass
BOUNDARY_REGEX = re.compile(r'^-{3,}$', re.MULTILINE)
DEFAULT_LOCALE_REGEX = re.compile(r'^[ ]{2,4}default_locale:[ ]+(.*)')
LOCALE_REGEX = re.compile(r'^\$locale:(.*)')
LOCALES_REGEX = re.compile(r'^\$locales:$')
LOCALIZED_KEY_REGEX = re.compile('(.*)@([^@]+)$')
LOCALIZATION_REGEX = re.compile(r'^\$localization:$')
ARRAY_ITEM_REGEX = re.compile(r'^[ ]*-[ ]+(.*)')
SUB_ITEM_REGEX = re.compile(r'^[ ]{2,4}')
COMBINED_TEMPLATE = '---\n{}\n---\n{}\n'
SINGLE_TEMPLATE = '{}\n'
def _update_deep(orig_dict, new_dict):
for k, v in new_dict.items():
if (k in orig_dict and isinstance(orig_dict[k], dict)
and isinstance(new_dict[k], collections.Mapping)):
_update_deep(orig_dict[k], new_dict[k])
else:
orig_dict[k] = new_dict[k]
class PlainText(object):
def __init__(self, tag, value):
self.tag = tag
self.value = value
class PlainTextYamlLoader(yaml_Loader):
def construct_plaintext(self, node):
return PlainText(node.tag, node.value)
class PlainTextYamlDumper(yaml.Dumper):
pass
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
def dict_representer(dumper, data):
return dumper.represent_dict(data.items())
def plain_text_representer(dumper, data):
return dumper.represent_scalar(data.tag, data.value)
# Don't want to actually process the constructors, just keep the values
_mapping_tag = yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG
PlainTextYamlDumper.add_representer(OrderedDict, dict_representer)
PlainTextYamlDumper.add_representer(PlainText, plain_text_representer)
PlainTextYamlLoader.add_constructor(_mapping_tag, dict_constructor)
PlainTextYamlLoader.add_constructor(
'!_', PlainTextYamlLoader.construct_plaintext)
PlainTextYamlLoader.add_constructor(
'!g.csv', PlainTextYamlLoader.construct_plaintext)
PlainTextYamlLoader.add_constructor(
'!g.doc', PlainTextYamlLoader.construct_plaintext)
PlainTextYamlLoader.add_constructor(
'!g.json', PlainTextYamlLoader.construct_plaintext)
PlainTextYamlLoader.add_constructor(
'!g.static', PlainTextYamlLoader.construct_plaintext)
PlainTextYamlLoader.add_constructor(
'!g.url', PlainTextYamlLoader.construct_plaintext)
PlainTextYamlLoader.add_constructor(
'!g.yaml', PlainTextYamlLoader.construct_plaintext)
class ConversionDocument(object):
def __init__(self, pod, file_name, default_locale):
self.default_locale = default_locale
self.pod = pod
self.file_name = file_name
self.raw_content = pod.read_file(file_name)
self.normalize_raw_content()
@staticmethod
def determine_default_locale(front_matter):
parsed = yaml.load(front_matter, Loader=PlainTextYamlLoader)
if '$localization' in parsed:
return parsed['$localization'].get('default_locale', None)
return None
@staticmethod
def determine_locales(front_matter, default_locale=None,
remove_default_locale=True, remove_locales=True):
if not front_matter:
return [], None
parsed = yaml.load(front_matter, Loader=PlainTextYamlLoader)
if isinstance(parsed, str):
parsed = OrderedDict()
locales = parsed.get('$locales', [])
if '$locale' in parsed:
locales.append(parsed['$locale'])
if remove_default_locale:
if default_locale in locales:
locales.pop(locales.index(default_locale))
if '$locales' in parsed and default_locale in parsed['$locales']:
parsed['$locales'].pop(
parsed['$locales'].index(default_locale))
if '$locale' in parsed and parsed['$locale'] == default_locale:
del parsed['$locale']
if remove_locales:
if '$locales' in parsed:
del parsed['$locales']
if '$locale' in parsed:
del parsed['$locale']
return locales, yaml.dump(
parsed, Dumper=PlainTextYamlDumper,
allow_unicode=True, default_flow_style=False).strip() if parsed else ''
@staticmethod
def convert_for_locale(front_matter, locale, base=None):
if not front_matter:
parsed = {}
else:
parsed = yaml.load(front_matter, Loader=PlainTextYamlLoader)
def visit(path, key, value):
if not isinstance(key, str):
return key, value
if key.endswith('@#'):
return key, value
match = LOCALIZED_KEY_REGEX.match(key)
if not match:
return key, value
base_key = match.group(1)
locale_from_key = match.group(2)
if locale_from_key == locale:
# If there is a key without the trailing @ then override it.
parent = parsed
for path_key in path:
parent = parent[path_key]
if base_key in parent:
return base_key, value
return '{}@'.format(base_key), value
return False
parsed = iterutils.remap(parsed, visit=visit)
# If there are pre-existing fields, use them as a base for the locale
# specific values.
result = base or {}
_update_deep(result, parsed)
return result
@staticmethod
def format_file(front_matter=None, content=None):
if front_matter is None or front_matter.strip() == '':
return SINGLE_TEMPLATE.format(content.lstrip())
if content is None or content.strip() == '':
return SINGLE_TEMPLATE.format(front_matter.lstrip())
return COMBINED_TEMPLATE.format(front_matter.lstrip(), content.lstrip())
@staticmethod
def gather_for_locale(front_matter, locale):
if not front_matter:
return ''
parsed = yaml.load(front_matter, Loader=PlainTextYamlLoader)
locale_extra = OrderedDict()
def visit(path, key, value):
if not isinstance(key, str):
return key, value
if key.endswith('@#'):
return key, value
match = LOCALIZED_KEY_REGEX.match(key)
if not match:
return key, value
base_key = match.group(1)
locale_from_key = match.group(2)
if locale_from_key == locale:
# If there is a key without the trailing @ then override it.
parent = parsed
locale_parent = locale_extra
for path_key in path:
parent = parent[path_key]
if isinstance(locale_parent, list):
locale_parent = locale_parent[path_key]
elif path_key not in locale_parent:
if isinstance(parent, list):
locale_parent[path_key] = copy.deepcopy(parent)
else:
locale_parent[path_key] = OrderedDict()
locale_parent = locale_parent[path_key]
else:
locale_parent = locale_parent[path_key]
if base_key in parent:
locale_parent[base_key] = value
else:
locale_parent['{}@'.format(base_key)] = value
if key in locale_parent:
locale_parent.pop(key, None)
return False
return key, value
parsed = iterutils.remap(parsed, visit=visit)
return (yaml.dump(
parsed, Dumper=PlainTextYamlDumper,
allow_unicode=True, default_flow_style=False).strip() if parsed else '',
locale_extra)
def convert(self):
# Files with @ in them should already be converted.
if '@' in self.file_name:
logging.info(
'Filename contains a @, skipping: {}'.format(self.file_name))
return
# Ignore hidden files.
if self.file_name.startswith('.'):
logging.info(
'Filename starts with ., skipping: {}'.format(self.file_name))
return
# Ignore files that don't have an extention
_, file_extension = os.path.splitext(self.file_name)
if not file_extension:
logging.info(
'Filename does not have an extension, skipping: {}'.format(self.file_name))
return
pairs = list(self.split())
if len(pairs) <= 1:
logging.info(
'Single locale detected, skipping: {}'.format(self.file_name))
return
logging.info('Converting: {}'.format(self.file_name))
logging.info(' - Number of content pairs: {}'.format(len(pairs)))
# Determine if there is a file specific default_locale in first pair.
default_locale = ConversionDocument.determine_default_locale(
pairs[0][0]) or self.default_locale
logging.info(' - Using default_locale: {}'.format(default_locale))
# Base content will be pruned of localized values that belong in files.
base_front_matter = pairs[0][0]
for pair in pairs[1:]:
locales, _ = ConversionDocument.determine_locales(
pair[0], default_locale, remove_locales=False,
remove_default_locale=False)
if not locales:
raise LocaleMissingError(
'A section in {} is missing a locale and would be lost.'.format(self.file_name))
# Ensure that there are not existing files for the Locales.
for locale in locales:
locale_filename = self.file_name_for_locale(locale)
if self.pod.file_exists(locale_filename):
raise LocaleExistsError(
'{} locale section (defined in {}) already has a localized file ({}).\nPlease resolve this confilict and re-run the conversion.'.format(
locale, self.file_name, locale_filename))
# Store each locale contents until the end so we can combine multiple
# sections that may use the same locale.
locale_to_content = {}
locale_to_front_matter = {}
for pair in pairs[1:]:
locales, front_matter = ConversionDocument.determine_locales(
pair[0], default_locale, remove_locales=True,
remove_default_locale=False)
for locale in locales:
locale_to_content[locale] = pair[1]
if locale in locale_to_front_matter:
locale_extra = locale_to_front_matter[locale]
else:
base_front_matter, locale_extra = ConversionDocument.gather_for_locale(
base_front_matter, locale)
# Combine the extra front_matter from the base document with
# the pair specific front_matter.
locale_front_matter = ConversionDocument.convert_for_locale(
front_matter, locale, base=locale_extra)
# Store the front matter in case another section adds to it.
locale_to_front_matter[locale] = locale_front_matter
# Write the final locale files.
for locale, locale_front_matter in locale_to_front_matter.items():
content = locale_to_content.get(locale, None)
locale_filename = self.file_name_for_locale(locale)
logging.info('Writing: {}'.format(locale_filename))
locale_front_matter_dump = yaml.dump(
locale_front_matter, Dumper=PlainTextYamlDumper, allow_unicode=True,
default_flow_style=False).strip() if locale_front_matter else ''
output = ConversionDocument.format_file(
locale_front_matter_dump, content)
self.pod.write_file(locale_filename, output)
# Do the base file after specific tagged fields are removed.
pair = pairs[0]
content = pair[1]
_, base_front_matter = ConversionDocument.determine_locales(
base_front_matter, default_locale, remove_locales=False,
remove_default_locale=True)
logging.info('Writing: {}'.format(self.file_name))
output = ConversionDocument.format_file(base_front_matter, content)
self.pod.write_file(self.file_name, output)
def file_name_for_locale(self, locale_identifier):
if locale_identifier is None:
return self.file_name
file_parts = self.file_name.split('.')
return '{}@{}.{}'.format(
'.'.join(file_parts[:-1]), locale_identifier, file_parts[-1])
def normalize_raw_content(self):
# Clean and rewrite the yaml files that start with an empty section.
if self.file_name.endswith('.yaml') and self.raw_content.lstrip().startswith('---'):
logging.info('Normalizing: {}'.format(self.file_name))
self.raw_content = self.raw_content.lstrip()[3:].lstrip()
self.pod.write_file(self.file_name, self.raw_content)
def split(self):
parts = BOUNDARY_REGEX.split(self.raw_content)
# Remove the first, empty list item.
if parts[0].strip() == '':
parts.pop(0)
# Yaml files have no 'content'
if self.file_name.endswith('.yaml'):
while parts:
yield parts.pop(0).strip(), None
else:
while parts:
if len(parts) == 1:
yield None, parts.pop(0).strip()
break
front_matter = None
content = ''
if parts:
front_matter = parts.pop(0).strip() or None
if parts:
content = parts.pop(0).strip() or None
yield front_matter, content
class Converter(object):
@staticmethod
def convert(pod):
default_locale = pod.podspec.default_locale
logging.info('Using default locale: {}'.format(default_locale))
# Go through each document and convert to the updated format.
for root, dirs, files in pod.walk('/content'):
pod_dir = root.replace(pod.root, '')
for file_name in files:
doc = ConversionDocument(
pod, os.path.join(pod_dir, file_name), default_locale)
try:
doc.convert()
except:
print('Error trying to convert: {}'.format(
os.path.join(pod_dir, file_name)))
raise
| 36.030162 | 160 | 0.606349 |
aceef4d07326629a794003a570011c7153cc97a6 | 473 | py | Python | app/main/views.py | OscarMugendi/News-API | 3146a3188e476a9ef06cf55f7d02e3db4b7c9d1c | [
"MIT"
] | null | null | null | app/main/views.py | OscarMugendi/News-API | 3146a3188e476a9ef06cf55f7d02e3db4b7c9d1c | [
"MIT"
] | null | null | null | app/main/views.py | OscarMugendi/News-API | 3146a3188e476a9ef06cf55f7d02e3db4b7c9d1c | [
"MIT"
] | null | null | null | from flask import render_template
from ..request import get_article,get_source
from . import main
from ..models import Source,Article
from newsapi import NewsApiClient
import urllib.request
# Views
@main.route('/')
def index():
sources = get_source('sources')
return render_template('index.html', sources=sources)
@main.route('/article/<id>')
def article(id):
articles = get_article(id)
return render_template('article.html', id = id, articles= articles) | 26.277778 | 71 | 0.7463 |
aceef579dabd62c7ca3250e37eabfdffed630492 | 2,317 | py | Python | mosquitto-1.5.4/test/broker/03-publish-b2c-timeout-qos2.py | RainaWLK/mqtt-test | cb4175c8bd1e35deed45941ca61c88fdcc6ddeba | [
"MIT"
] | null | null | null | mosquitto-1.5.4/test/broker/03-publish-b2c-timeout-qos2.py | RainaWLK/mqtt-test | cb4175c8bd1e35deed45941ca61c88fdcc6ddeba | [
"MIT"
] | null | null | null | mosquitto-1.5.4/test/broker/03-publish-b2c-timeout-qos2.py | RainaWLK/mqtt-test | cb4175c8bd1e35deed45941ca61c88fdcc6ddeba | [
"MIT"
] | 1 | 2021-06-19T17:17:41.000Z | 2021-06-19T17:17:41.000Z | #!/usr/bin/env python
# Test whether a SUBSCRIBE to a topic with QoS 2 results in the correct SUBACK packet.
import subprocess
import inspect, os, sys
# From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import mosq_test
rc = 1
mid = 3265
keepalive = 60
connect_packet = mosq_test.gen_connect("pub-qo2-timeout-test", keepalive=keepalive)
connack_packet = mosq_test.gen_connack(rc=0)
subscribe_packet = mosq_test.gen_subscribe(mid, "qos2/timeout/test", 2)
suback_packet = mosq_test.gen_suback(mid, 2)
mid = 1
publish_packet = mosq_test.gen_publish("qos2/timeout/test", qos=2, mid=mid, payload="timeout-message")
publish_dup_packet = mosq_test.gen_publish("qos2/timeout/test", qos=2, mid=mid, payload="timeout-message", dup=True)
pubrec_packet = mosq_test.gen_pubrec(mid)
pubrel_packet = mosq_test.gen_pubrel(mid)
pubcomp_packet = mosq_test.gen_pubcomp(mid)
broker = mosq_test.start_broker(filename=os.path.basename(__file__))
try:
sock = mosq_test.do_client_connect(connect_packet, connack_packet)
mosq_test.do_send_receive(sock, subscribe_packet, suback_packet, "suback")
pub = subprocess.Popen(['./03-publish-b2c-timeout-qos2-helper.py'], stdout=subprocess.PIPE, stderr=subprocess.PIPE))
pub.wait()
(stdo, stde) = pub.communicate()
# Should have now received a publish command
if mosq_test.expect_packet(sock, "publish", publish_packet):
# Wait for longer than 5 seconds to get republish with dup set
# This is covered by the 8 second timeout
if mosq_test.expect_packet(sock, "dup publish", publish_dup_packet):
mosq_test.do_send_receive(sock, pubrec_packet, pubrel_packet, "pubrel")
# Wait for longer than 5 seconds to get republish with dup set
# This is covered by the 8 second timeout
if mosq_test.expect_packet(sock, "dup pubrel", pubrel_packet):
sock.send(pubcomp_packet)
rc = 0
sock.close()
finally:
broker.terminate()
broker.wait()
(stdo, stde) = broker.communicate()
if rc:
print(stde)
exit(rc)
| 35.106061 | 129 | 0.722486 |
aceef57d53fcf8ebed20c13b44b990d8e2d2c6e4 | 1,201 | py | Python | robomachine/__init__.py | mkorpela/RoboMachine | 90c9957f93adecceaf619c6c70b366a574d5ed0c | [
"Apache-2.0"
] | 78 | 2015-04-11T00:22:13.000Z | 2021-11-22T03:57:57.000Z | robomachine/__init__.py | mkorpela/RoboMachine | 90c9957f93adecceaf619c6c70b366a574d5ed0c | [
"Apache-2.0"
] | 15 | 2015-03-27T12:36:50.000Z | 2022-03-25T15:36:40.000Z | robomachine/__init__.py | mkorpela/RoboMachine | 90c9957f93adecceaf619c6c70b366a574d5ed0c | [
"Apache-2.0"
] | 15 | 2015-03-30T11:57:14.000Z | 2019-12-18T04:15:12.000Z | # Copyright 2011-2012 Mikko Korpela
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from StringIO import StringIO
except:
from io import StringIO
from robomachine.parsing import parse
from robomachine.version import VERSION
from robomachine.generator import Generator, DepthFirstSearchStrategy
__version__ = VERSION
def generate(machine, max_tests=1000, max_actions=None, to_state=None, output=None,
strategy=DepthFirstSearchStrategy):
generator = Generator()
return generator.generate(machine, max_tests, max_actions, to_state, output, strategy)
def transform(text):
output = StringIO()
generate(parse(text), output=output)
return output.getvalue() | 33.361111 | 90 | 0.764363 |
aceef6e53447813622c94473a304064efa0e32fe | 105,785 | py | Python | grizli/aws/db.py | jkmatharu/grizli | 7e2eb918667ac9f845d0847452a72138fc22fbcd | [
"MIT"
] | null | null | null | grizli/aws/db.py | jkmatharu/grizli | 7e2eb918667ac9f845d0847452a72138fc22fbcd | [
"MIT"
] | null | null | null | grizli/aws/db.py | jkmatharu/grizli | 7e2eb918667ac9f845d0847452a72138fc22fbcd | [
"MIT"
] | null | null | null | """
Interact with the grizli AWS database
"""
import os
import numpy as np
FLAGS = {'init_lambda': 1,
'start_beams': 2,
'done_beams': 3,
'no_run_fit': 4,
'start_redshift_fit': 5,
'fit_complete': 6}
COLUMNS = ['root', 'id', 'status', 'ra', 'dec', 'ninput', 'redshift', 'as_epsf', 't_g102', 'n_g102', 'p_g102', 't_g141', 'n_g141', 'p_g141', 't_g800l', 'n_g800l', 'p_g800l', 'numlines', 'haslines', 'chi2poly', 'chi2spl', 'splf01', 'sple01', 'splf02', 'sple02', 'splf03', 'sple03', 'splf04', 'sple04', 'huberdel', 'st_df', 'st_loc', 'st_scl', 'dof', 'chimin', 'chimax', 'bic_poly', 'bic_spl', 'bic_temp', 'z02', 'z16', 'z50', 'z84', 'z97', 'zwidth1', 'zwidth2', 'z_map', 'zrmin', 'zrmax', 'z_risk', 'min_risk', 'd4000', 'd4000_e', 'dn4000', 'dn4000_e', 'dlineid', 'dlinesn', 'flux_pab', 'err_pab', 'ew50_pab', 'ewhw_pab', 'flux_hei_1083', 'err_hei_1083', 'ew50_hei_1083', 'ewhw_hei_1083', 'flux_siii', 'err_siii', 'ew50_siii', 'ewhw_siii', 'flux_oii_7325', 'err_oii_7325', 'ew50_oii_7325', 'ewhw_oii_7325', 'flux_ariii_7138', 'err_ariii_7138', 'ew50_ariii_7138', 'ewhw_ariii_7138', 'flux_sii', 'err_sii', 'ew50_sii', 'ewhw_sii', 'flux_ha', 'err_ha', 'ew50_ha', 'ewhw_ha', 'flux_oi_6302', 'err_oi_6302', 'ew50_oi_6302', 'ewhw_oi_6302', 'flux_hei_5877', 'err_hei_5877', 'ew50_hei_5877', 'ewhw_hei_5877', 'flux_oiii', 'err_oiii', 'ew50_oiii', 'ewhw_oiii', 'flux_hb', 'err_hb', 'ew50_hb', 'ewhw_hb', 'flux_oiii_4363', 'err_oiii_4363', 'ew50_oiii_4363', 'ewhw_oiii_4363', 'flux_hg', 'err_hg', 'ew50_hg', 'ewhw_hg', 'flux_hd', 'err_hd', 'ew50_hd', 'ewhw_hd', 'flux_h7', 'err_h7', 'ew50_h7', 'ewhw_h7', 'flux_h8', 'err_h8', 'ew50_h8', 'ewhw_h8', 'flux_h9', 'err_h9', 'ew50_h9', 'ewhw_h9', 'flux_h10', 'err_h10', 'ew50_h10', 'ewhw_h10', 'flux_neiii_3867', 'err_neiii_3867', 'ew50_neiii_3867', 'ewhw_neiii_3867', 'flux_oii', 'err_oii', 'ew50_oii', 'ewhw_oii', 'flux_nevi_3426', 'err_nevi_3426', 'ew50_nevi_3426', 'ewhw_nevi_3426', 'flux_nev_3346', 'err_nev_3346', 'ew50_nev_3346', 'ewhw_nev_3346', 'flux_mgii', 'err_mgii', 'ew50_mgii', 'ewhw_mgii', 'flux_civ_1549', 'err_civ_1549', 'ew50_civ_1549', 'ewhw_civ_1549', 'flux_ciii_1908', 'err_ciii_1908', 'ew50_ciii_1908', 'ewhw_ciii_1908', 'flux_oiii_1663', 'err_oiii_1663', 'ew50_oiii_1663', 'ewhw_oiii_1663', 'flux_heii_1640', 'err_heii_1640', 'ew50_heii_1640', 'ewhw_heii_1640', 'flux_niii_1750', 'err_niii_1750', 'ew50_niii_1750', 'ewhw_niii_1750', 'flux_niv_1487', 'err_niv_1487', 'ew50_niv_1487', 'ewhw_niv_1487', 'flux_nv_1240', 'err_nv_1240', 'ew50_nv_1240', 'ewhw_nv_1240', 'flux_lya', 'err_lya', 'ew50_lya', 'ewhw_lya', 'pdf_max', 'cdf_z', 'sn_pab', 'sn_hei_1083', 'sn_siii', 'sn_oii_7325', 'sn_ariii_7138', 'sn_sii', 'sn_ha', 'sn_oi_6302', 'sn_hei_5877', 'sn_oiii', 'sn_hb', 'sn_oiii_4363', 'sn_hg', 'sn_hd', 'sn_h7', 'sn_h8', 'sn_h9', 'sn_h10', 'sn_neiii_3867', 'sn_oii', 'sn_nevi_3426', 'sn_nev_3346', 'sn_mgii', 'sn_civ_1549', 'sn_ciii_1908', 'sn_oiii_1663', 'sn_heii_1640', 'sn_niii_1750', 'sn_niv_1487', 'sn_nv_1240', 'sn_lya', 'chinu', 'bic_diff', 'log_risk', 'log_pdf_max', 'zq', 'mtime', 'vel_bl', 'vel_nl', 'vel_z', 'vel_nfev', 'vel_flag', 'grizli_version']
def get_connection_info(config_file=None):
"""
Read the database connection info
"""
import yaml
if config_file is None:
config_file = os.path.join(os.path.dirname(__file__),
'../data/db.yml')
try:
local_file = os.path.join(os.getenv('HOME'), 'db.local.yml')
if os.path.exists(local_file):
print('Use ~/db.local.yml')
config_file = local_file
except:
pass
fp = open(config_file)
try:
db_info = yaml.load(fp, Loader=yaml.FullLoader)
except:
db_info = yaml.load(fp)
fp.close()
return db_info
def get_db_engine(config=None, echo=False):
"""
Generate an SQLAlchemy engine for the grizli database
"""
from sqlalchemy import create_engine
if config is None:
config = get_connection_info()
db_string = "postgresql://{0}:{1}@{2}:{3}/{4}".format(config['username'], config['password'], config['hostname'], config['port'], config['database'])
engine = create_engine(db_string, echo=echo)
return engine
def get_redshift_fit_status(root, id, table='redshift_fit', engine=None):
"""
Get status value from the database for root_id object
"""
import pandas as pd
if engine is None:
engine = get_db_engine(echo=False)
res = pd.read_sql_query("SELECT status FROM {2} WHERE (root = '{0}' AND id = {1})".format(root, id, table), engine)
if len(res) == 0:
return -1
else:
return res['status'][0]
def update_jname():
from grizli import utils
res = grizli_db.from_sql("select p_root, p_id, p_ra, p_dec from photometry_apcorr", engine)
jn = [utils.radec_to_targname(ra=ra, dec=dec, round_arcsec=(0.001, 0.001), precision=2, targstr='j{rah}{ram}{ras}.{rass}{sign}{ded}{dem}{des}.{dess}') for ra, dec in zip(res['p_ra'], res['p_dec'])]
for c in res.colnames:
res.rename_column(c, c.replace('p_', 'j_'))
zres = grizli_db.from_sql("select root, phot_root, id, ra, dec, z_map,"
"q_z, t_g800l, t_g102, t_g141, status from "
"redshift_fit where ra is not null and "
"status > 5", engine)
# Find duplicates
from scipy.spatial import cKDTree
data = np.array([zres['ra'], zres['dec']]).T
ok = zres['q_z'].filled(-100) > -0.7
tree = cKDTree(data[ok])
dr, ix = tree.query(data[ok], k=2)
cosd = np.cos(data[:, 1]/180*np.pi)
dup = (dr[:, 1] < 0.01/3600) # & (zres['phot_root'][ix[:,0]] != zres['phot_root'][ix[:,1]])
ix0 = ix[:, 0]
ix1 = ix[:, 1]
dup = (dr[:, 1] < 0.01/3600)
dup &= (zres['phot_root'][ok][ix0] == zres['phot_root'][ok][ix1])
dup &= (zres['id'][ok][ix0] == zres['id'][ok][ix1])
# second is G800L
dup &= zres['t_g800l'].filled(0)[ok][ix1] > 10
plt.scatter(zres['z_map'][ok][ix0[dup]], zres['z_map'][ok][ix1[dup]],
marker='.', alpha=0.1)
def update_redshift_fit_status(root, id, status=0, table='redshift_fit', engine=None, verbose=True):
"""
Set the status flag in the table
"""
import time
import pandas as pd
from astropy.table import Table
from astropy.time import Time
NOW = Time.now().iso
if engine is None:
engine = get_db_engine(echo=False)
old_status = get_redshift_fit_status(root, id, table=table, engine=engine)
if old_status < 0:
# Need to add an empty row
tab = Table()
tab['root'] = [root]
tab['id'] = [id]
tab['status'] = [status]
tab['mtime'] = [NOW]
row_df = tab.to_pandas()
add_redshift_fit_row(row_df, engine=engine, table=table,
verbose=verbose)
else:
sqlstr = """UPDATE {0}
SET status = {1}, mtime = '{2}'
WHERE (root = '{3}' AND id = {4});
""".format(table, status, NOW, root, id)
if verbose:
msg = 'Update status for {0} {1}: {2} -> {3} on `{4}` ({5})'
print(msg.format(root, id, old_status, status, table, NOW))
engine.execute(sqlstr)
def get_row_data(rowfile='gds-g800l-j033236m2748_21181.row.fits', status_flag=FLAGS['fit_complete']):
"""
Convert table from a row file to a pandas DataFrame
"""
import pandas as pd
from astropy.table import Table
from astropy.time import Time
NOW = Time.now().iso
if isinstance(rowfile, str):
if rowfile.endswith('.fits'):
tab = Table.read(rowfile, character_as_bytes=False)
allowed_columns = COLUMNS
else:
# Output of stellar fits
tab = Table.read(rowfile, format='ascii.commented_header')
tab['chinu'] = tab['chi2']/tab['dof']
tab['phot_root'] = tab['root']
tab.rename_column('best_template', 'stellar_template')
try:
tab['chinu'] = tab['chi2']/tab['dof']
tab['phot_root'] = tab['root']
# BIC of spline-only and template fits
bic_spl = np.log(tab['dof'])*(tab['nk']-1) + tab['chi2_flat']
bic_star = np.log(tab['dof'])*(tab['nk']) + tab['chi2']
tab['bic_diff_star'] = bic_spl - bic_star
except:
print('Parse {0} failed'.format(rowfile))
pass
allowed_columns = ['root', 'id', 'ra', 'dec', 'chi2', 'nk', 'dof',
'chinu', 'chi2_flat', 'bic_diff_star', 'mtime',
'stellar_template', 'status', 'phot_root',
'as_epsf']
else:
tab = rowfile
if 'cdf_z' in tab.colnames:
cdf_z = tab['cdf_z'].data
tab.remove_column('cdf_z')
else:
cdf_z = None
tab['mtime'] = NOW
tab['status'] = status_flag
remove_cols = []
for c in tab.colnames:
if '-' in c:
tab.rename_column(c, c.replace('-', '_'))
for c in tab.colnames:
tab.rename_column(c, c.lower())
# Remove columns not in the database
remove_cols = []
for c in tab.colnames:
if c not in allowed_columns:
#print('Remove column: ', c)
remove_cols.append(c)
if len(remove_cols) > 0:
tab.remove_columns(remove_cols)
row_df = tab.to_pandas()
if cdf_z is not None:
row_df['cdf_z'] = cdf_z.tolist()
return row_df
def delete_redshift_fit_row(root, id, table='redshift_fit', engine=None):
"""
Delete a row from the redshift fit table
"""
if engine is None:
engine = get_db_engine(echo=False)
res = engine.execute("DELETE from {2} WHERE (root = '{0}' AND id = {1})".format(root, id, table))
def add_redshift_fit_row(row_df, table='redshift_fit', engine=None, verbose=True):
"""
Update the row in the redshift_fit table
"""
if engine is None:
engine = get_db_engine(echo=False)
if isinstance(row_df, str):
row_df = get_row_data(row_df)
if ('root' not in row_df.columns) | ('id' not in row_df.columns):
print('Need at least "root" and "id" columns in the row data')
return False
root = row_df['root'][0]
id = row_df['id'][0]
status = get_redshift_fit_status(root, id, table=table, engine=engine)
# Delete the old row?
if status >= 0:
print('Delete and update row for {0}/{1} on `{2}`'.format(root, id,
table))
delete_redshift_fit_row(root, id, table=table, engine=engine)
else:
print('Add row for {0}/{1} on `{2}`'.format(root, id, table))
# Add the new data
row_df.to_sql(table, engine, index=False, if_exists='append', method='multi')
###########
def add_missing_rows(root='j004404m2034', engine=None):
"""
Add rows that were completed but that aren't in the table
"""
import glob
from astropy.table import vstack, Table
from grizli.aws import db as grizli_db
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
os.system('aws s3 sync s3://grizli-v1/Pipeline/{0}/Extractions/ ./ --exclude "*" --include "*row.fits"'.format(root))
row_files = glob.glob('{0}*row.fits'.format(root))
row_files.sort()
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE root = '{0}' AND status=6".format(root), engine)
res_ids = res['id'].to_list()
tabs = []
print('\n\n NROWS={0}, NRES={1}\n\n'.format(len(row_files), len(res)))
for row_file in row_files:
id_i = int(row_file.split('.row.fits')[0][-5:])
if id_i not in res_ids:
grizli_db.add_redshift_fit_row(row_file, engine=engine, verbose=True)
def convert_1D_to_lists(file='j234420m4245_00615.1D.fits'):
"""
Convert 1D spectral data to lists suitable for putting into dataframes
and sending to the databases.
"""
from collections import OrderedDict
import astropy.io.fits as pyfits
from .. import utils
if not os.path.exists(file):
print('Spectrum file not found')
return False
im = pyfits.open(file)
obj_id = im[0].header['ID']
obj_root = im[0].header['TARGET']
if '.R30.' in file:
skip_columns = ['line', 'cont']
pref = 'spec1d_r30'
else:
skip_columns = []
pref = 'spec1d'
spectra = OrderedDict()
has_spectra = False
for gr in ['G102', 'G141', 'G800L']:
if gr in im:
has_spectra = True
sp = utils.GTable.read(file, hdu=gr)
prefix = '{0}_{1}_'.format(pref, gr.lower())
spd = {prefix+'id': obj_id, prefix+'root': obj_root}
for c in sp.colnames:
if c in skip_columns:
continue
spd[prefix+c] = sp[c].tolist()
spectra[gr.lower()] = spd
if has_spectra:
return spectra
else:
return False
def send_1D_to_database(files=[], engine=None):
"""
Send a list of 1D spectra to the spectra databases
ToDo: check for existing lines
"""
from collections import OrderedDict
import pandas as pd
if engine is None:
engine = get_db_engine()
tables = OrderedDict()
for file in files:
sp_i = convert_1D_to_lists(file=file)
print('Read spec1d file: {0}'.format(file))
for gr in sp_i:
# Initialize the columns
if gr not in tables:
tables[gr] = OrderedDict()
for c in sp_i[gr]:
tables[gr][c] = []
# Add the data
for c in sp_i[gr]:
tables[gr][c].append(sp_i[gr][c])
prefix = 'spec1d_r30' if '.R30.' in files[0] else 'spec1d'
for gr in tables:
tablename = '{0}_{1}'.format(prefix, gr)
df = pd.DataFrame(tables[gr])
# Put wavelengths in their own tables to avoid massive duplication
wave_table = tablename+'_wave'
if wave_table not in engine.table_names():
print('Create wave table: '+wave_table)
wdf = pd.DataFrame(data=tables[gr][wave_table][0],
columns=[wave_table])
wdf.to_sql(wave_table, engine, if_exists='replace',
index=True, index_label=tablename+'_idx')
# drop wave from spectra tables
df.drop('{0}_wave'.format(tablename), axis=1, inplace=True)
# Create table
if tablename not in engine.table_names():
print('Initialize table {0}'.format(tablename))
SQL = "CREATE TABLE {0} (\n".format(tablename)
SQL += ' {0}_root text,\n'.format(tablename)
SQL += ' {0}_id integer,\n'.format(tablename)
for c in df.columns:
item = df[c][0]
if isinstance(item, list):
SQL += ' {0} real[{1}],\n'.format(c, len(item))
engine.execute(SQL[:-2]+')')
try:
engine.execute("CREATE INDEX {0}_idx ON {0} ({0}_root, {0}_id);".format(tablename))
except:
pass
# Delete existing duplicates
if tablename in engine.table_names():
SQL = """DELETE from {0} WHERE """.format(tablename)
mat = ["({0}_root = '{1}' AND {0}_id = {2})".format(tablename, r, i) for r, i in zip(df[tablename+'_root'], df[tablename+'_id'])]
SQL += 'OR '.join(mat)
rsp = engine.execute(SQL)
# Send the table
print('Send {0} rows to {1}'.format(len(df), tablename))
df.to_sql(tablename, engine, index=False, if_exists='append',
method='multi')
def add_all_spectra():
from grizli.aws import db as grizli_db
roots = grizli_db.from_sql("select root,count(root) as n from redshift_fit group BY root order by n DESC", engine)
o = 1
for root in roots['root'][::o]:
existing = open('log').readlines()
if root+'\n' in existing:
print('Skip', root)
continue
fp = open('log', 'a')
fp.write(root+'\n')
fp.close()
try:
grizli_db.add_oned_spectra(root=root, engine=engine)
except:
pass
def add_oned_spectra(root='j214224m4420gr01', bucket='grizli-v1', engine=None):
import os
import glob
if engine is None:
engine = get_db_engine()
# import boto3
# s3 = boto3.resource('s3')
# bkt = s3.Bucket(bucket)
#
# files = [obj.key for obj in bkt.objects.filter(Prefix='Pipeline/{0}/Extractions/'.format(root))]
#
# for file in files:
# if (('.R30.fits' in file) | ('.1D.fits' in file)) & (not os.path.exists(file)):
# local_file = os.path.basename(file)
# print(local_file)
# bkt.download_file(file, local_file,
# ExtraArgs={"RequestPayer": "requester"})
os.system('aws s3 sync s3://{0}/Pipeline/{1}/Extractions/ ./ --exclude "*" --include "*R30.fits" --include "*1D.fits"'.format(bucket, root))
nmax = 500
# 1D.fits
files = glob.glob('{0}_*1D.fits'.format(root))
files.sort()
for i in range(len(files)//nmax+1):
send_1D_to_database(files=files[i*nmax:(i+1)*nmax], engine=engine)
files = glob.glob('{0}_*R30.fits'.format(root))
files.sort()
for i in range(len(files)//nmax+1):
send_1D_to_database(files=files[i*nmax:(i+1)*nmax], engine=engine)
os.system('rm {0}_*.1D.fits {0}_*.R30.fits'.format(root))
if False:
tablename = 'spec1d_g141'
#tablename = 'spec1d_g102'
#tablename = 'spec1d_r30_g141'
if 1:
# by root
resp = pd.read_sql_query("SELECT root, id, z_map, q_z, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND root = '{0}' AND q_z > -0.7 ORDER BY z_map".format(root, tablename), engine)
else:
# everything
resp = pd.read_sql_query("SELECT root, id, z_map, q_z, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND q_z > -0.7 ORDER BY z_map".format(root, tablename), engine)
# Halpha EW
resp = pd.read_sql_query("SELECT root, id, z_map, q_z, ew50_ha, flux_ha, err_ha, t_g141, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND q_z > -0.3 AND err_ha > 0 ORDER BY ew50_ha".format(root, tablename), engine)
# Everything
fresp = pd.read_sql_query("SELECT root, id, z_map, q_z, ew50_ha, flux_ha, err_ha, ew50_oiii, ew50_hb, ew50_oii, d4000, d4000_e, t_g141, t_g102, t_g800l, sp.* from redshift_fit, {1} as sp WHERE {1}_root = root AND {1}_id = id AND q_z > -0.7 AND chinu < 2 ORDER BY z_map".format(root, tablename), engine)
wave = pd.read_sql_query("SELECT * from {0}_wave".format(tablename),
engine)[tablename+'_wave'].values
resp = fresp
sort_column = 'z_map'
bin_factor = 1
wnorm = 6400
zref = 1.3e4/wnorm-1
sel = np.isfinite(fresp[sort_column]) & (fresp[sort_column] != -99)
norm_ix = np.interp(wnorm*(1+fresp['z_map']), wave, np.arange(len(wave)), left=np.nan, right=np.nan)
sel &= np.isfinite(norm_ix)
resp = fresp[sel]
norm_ix = np.cast[int](np.round(np.interp(wnorm*(1+resp['z_map']), wave, np.arange(len(wave)), left=np.nan, right=np.nan)))
resp.sort_values(sort_column, inplace=True)
if tablename == 'spec1d_g141':
exptime = resp['t_g141'].values
wlim = [1.1e4, 1.65e4]
else:
exptime = resp['t_g102'].values
wlim = [8000, 1.1e4, 1.65e4]
data = OrderedDict()
for c in resp.columns:
if c.startswith(tablename):
c_i = c.split(tablename+'_')[1]
try:
data[c_i] = np.array(resp[c].values.tolist())
except:
pass
#plt.imshow((data['flux'] - data['cont'])/data['flat']/1.e-19, vmin=-0.1, vmax=10)
# Rest-frame
dz = np.diff(wave)[10]/wave[10]
max_zshift = np.cast[int](np.log(1+resp['z_map'].max())/dz)
zshift = np.cast[int]((np.log(1+resp['z_map']) - np.log(1+zref))/dz)
err_max = 5
# Continuum normalized
#norm = data['cont'][:,100]/data['flat'][:,100]
norm = np.zeros(len(resp))
for i, ix in enumerate(norm_ix):
norm[i] = data['line'][i, ix]/data['flat'][i, ix]
#norm = np.mean(data['cont'][:,50:120]/data['flat'][:,50:120], axis=1)
# 2D arrays
normed = ((data['flux']/data['flat']).T/norm).T
cnormed = ((data['cont']/data['flat']).T/norm).T
lnormed = (((data['line']-data['cont'])/data['flat']).T/norm).T
err = ((data['err']/data['flat']).T/norm).T
mask = np.isfinite(norm) & (norm > 0) & np.isfinite(norm_ix)
normed = normed[mask, :]
cnormed = cnormed[mask, :]
lnormed = lnormed[mask, :]
err = err[mask, :]
ivar = 1/err**2
ivar[err <= 0] = 0
# Weight by exposure time
ivar = (ivar.T*0+(exptime[mask]/4000.)*norm[mask]).T
zshift = zshift[mask]
# Clip edges
wclip = (wave > wlim[0]) & (wave < wlim[1])
mask_val = 1e10
normed[:, ~wclip] = -mask_val
cnormed[:, ~wclip] = -mask_val
lnormed[:, ~wclip] = -mask_val
sh = normed.shape
rest = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min())) - mask_val
crest = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min())) - mask_val
lrest = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min())) - mask_val
rest[:, zshift.max():zshift.max()+sh[1]] = normed*1
crest[:, zshift.max():zshift.max()+sh[1]] = cnormed*1
lrest[:, zshift.max():zshift.max()+sh[1]] = lnormed*1
rest_ivar = np.zeros((sh[0], sh[1]+zshift.max()-zshift.min()))
rest_ivar[:, zshift.max():zshift.max()+sh[1]] = ivar*1
for i in range(sh[0]):
rest[i, :] = np.roll(rest[i, :], -zshift[i])
crest[i, :] = np.roll(crest[i, :], -zshift[i])
lrest[i, :] = np.roll(lrest[i, :], -zshift[i])
rest_ivar[i, :] = np.roll(rest_ivar[i, :], -zshift[i])
ok = np.isfinite(rest) & np.isfinite(rest_ivar) & (rest > -0.8*mask_val)
rest_ivar[~ok] = 0
rest[~ok] = -mask_val
crest[~ok] = -mask_val
lrest[~ok] = -mask_val
shr = rest.shape
nbin = int((shr[0]//shr[1])//2*bin_factor)*2
kernel = np.ones((1, nbin)).T
# npix = np.maximum(nd.convolve((rest > -0.8*mask_val)*1, kernel), 1)
# srest = nd.convolve(rest*(rest > -0.8*mask_val), kernel)
# sbin = (srest/npix)[::nbin,:]
# plt.imshow(sbin, vmin=0, vmax=5)
num = nd.convolve(rest*rest_ivar, kernel)
cnum = nd.convolve(crest*rest_ivar, kernel)
lnum = nd.convolve(lrest*rest_ivar, kernel)
den = nd.convolve(rest_ivar, kernel)
wbin = (num/den)[::nbin, :]
wbin[~np.isfinite(wbin)] = 0
cwbin = (cnum/den)[::nbin, :]
cwbin[~np.isfinite(cwbin)] = 0
lwbin = (lnum/den)[::nbin, :]
lwbin[~np.isfinite(lwbin)] = 0
plt.imshow(wbin, vmin=0, vmax=5)
plt.imshow((data['line'] - data['cont'])/data['flat']/1.e-19, vmin=-0.1, vmax=10)
def run_lambda_fits(root='j004404m2034', phot_root=None, mag_limits=[15, 26], sn_limit=7, min_status=None, engine=None, zr=[0.01, 3.4], bucket='grizli-v1', verbose=True, extra={'bad_pa_threshold': 10}):
"""
Run redshift fits on lambda for a given root
"""
from grizli.aws import fit_redshift_lambda
from grizli import utils
from grizli.aws import db as grizli_db
if engine is None:
engine = grizli_db.get_db_engine()
import pandas as pd
import numpy as np
import glob
import os
print('Sync phot catalog')
if phot_root is None:
root = root
os.system('aws s3 sync s3://{1}/Pipeline/{0}/Extractions/ ./ --exclude "*" --include "*_phot*.fits"'.format(phot_root, bucket))
print('Sync wcs.fits')
os.system('aws s3 sync s3://{1}/Pipeline/{0}/Extractions/ ./ --exclude "*" --include "*_phot*.fits" --include "*wcs.fits"'.format(root, bucket))
phot = utils.read_catalog('{0}_phot_apcorr.fits'.format(phot_root))
phot['has_grism'] = 0
wcs_files = glob.glob('*wcs.fits')
for f in wcs_files:
w = utils.WCSFootprint(f, ext=0)
has = w.path.contains_points(np.array([phot['ra'], phot['dec']]).T)
print(f, has.sum())
phot['has_grism'] += has
mag = phot['mag_auto']*np.nan
mag_filt = np.array([' ']*len(phot))
sn = phot['mag_auto']*np.nan
for filt in ['f160w', 'f140w', 'f125w', 'f105w', 'f110w', 'f098m', 'f814w', 'f850lp', 'f606w', 'f775w']:
if '{0}_tot_1'.format(filt) in phot.colnames:
mag_i = 23.9-2.5*np.log10(phot['{0}_tot_1'.format(filt)])
fill = (~np.isfinite(mag)) & np.isfinite(mag_i)
mag[fill] = mag_i[fill]
mag_filt[fill] = filt
sn_i = phot['{0}_tot_1'.format(filt)]/phot['{0}_etot_1'.format(filt)]
sn[fill] = sn_i[fill]
sel = np.isfinite(mag) & (mag >= mag_limits[0]) & (mag <= mag_limits[1]) & (phot['has_grism'] > 0)
sel &= phot['flux_radius'] > 1
sel &= sn > sn_limit
if min_status is not None:
res = pd.read_sql_query("SELECT root, id, status, mtime FROM redshift_fit WHERE root = '{0}'".format(root, min_status), engine)
if len(res) > 0:
status = phot['id']*0-100
status[res['id']-1] = res['status']
sel &= status < min_status
ids = phot['id'][sel]
# Select just on min_status
if min_status > 1000:
if min_status > 10000:
# Include mag constraints
res = pd.read_sql_query("SELECT root, id, status, mtime, mag_auto FROM redshift_fit,photometry_apcorr WHERE root = '{0}' AND status = {1}/10000 AND mag_auto > {2} AND mag_auto < {3} AND p_root = root AND p_id = id".format(root, min_status, mag_limits[0], mag_limits[1]), engine)
else:
# just select on status
res = pd.read_sql_query("SELECT root, id, status, mtime FROM redshift_fit WHERE root = '{0}' AND status = {1}/1000".format(root, min_status, mag_limits[0], mag_limits[1]), engine)
ids = res['id'].tolist()
if len(ids) == 0:
return False
fit_redshift_lambda.fit_lambda(root=root, beams=[], ids=ids, newfunc=False, bucket_name=bucket, skip_existing=False, sleep=False, skip_started=False, show_event=False, zr=zr, force_args=True, quasar_fit=False, output_path=None, save_figures='png', verbose=verbose, **extra)
print('Add photometry: {0}'.format(root))
grizli_db.add_phot_to_db(phot_root, delete=False, engine=engine)
res = grizli_db.wait_on_db_update(root, dt=15, n_iter=120, engine=engine)
grizli_db.set_phot_root(root, phot_root, engine)
res = pd.read_sql_query("SELECT root, id, flux_radius, mag_auto, z_map, status, bic_diff, zwidth1, log_pdf_max, chinu FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE z_map > 0 AND root = '{0}') z ON (p.p_root = z.root AND p.p_id = z.id)".format(root), engine)
return res
if False:
res = pd.read_sql_query("SELECT root, id, status, redshift, bic_diff, mtime FROM redshift_fit WHERE (root = '{0}')".format(root), engine)
# Get arguments
args = fit_redshift_lambda.fit_lambda(root=root, beams=[], ids=ids, newfunc=False, bucket_name='grizli-v1', skip_existing=False, sleep=False, skip_started=False, quasar_fit=False, output_path=None, show_event=2, zr=[0.01, 3.4], force_args=True)
def set_phot_root(root, phot_root, engine):
"""
"""
print('Set phot_root = {0} > {1}'.format(root, phot_root))
SQL = """UPDATE redshift_fit
SET phot_root = '{phot_root}'
WHERE (root = '{root}');
""".format(phot_root=phot_root, root=root)
engine.execute(SQL)
if False:
# Check where phot_root not equal to root
res = pd.read_sql_query("SELECT root, id, status, phot_root FROM redshift_fit WHERE (phot_root != root)".format(root), engine)
# update the one pointing where it should change in photometry_apcorr
engine.execute("UPDATE photometry_apcorr SET p_root = 'j214224m4420' WHERE root = 'j214224m4420gr01';")
engine.execute("UPDATE redshift_fit SET phot_root = 'j214224m4420' WHERE root LIKE 'j214224m4420g%%';")
engine.execute("UPDATE redshift_fit_quasar SET phot_root = 'j214224m4420' WHERE root LIKE 'j214224m4420g%%';")
if False:
# Replace in-place
engine.execute("update redshift_fit set phot_root = replace(root, 'g800l', 'grism') WHERE root not like 'j214224m4420%%' AND root LIKE '%%-grism%%")
engine.execute("update redshift_fit set phot_root = replace(root, 'g800l', 'grism') WHERE root not like 'j214224m4420%%'")
engine.execute("update redshift_fit set phot_root = 'j214224m4420' WHERE root like 'j214224m4420gr%%'")
engine.execute("update redshift_fit_quasar set phot_root = replace(root, 'g800l', 'grism') where root like '%%g800l%%'")
# Set 3D-HST fields
res = grizli_db.from_sql("select distinct root from redshift_fit where root like '%%-grism%%'", engine)
for root in res['root']:
grizli_db.set_phot_root(root, root, engine)
grizli_db.set_phot_root(root.replace('-grism', '-g800l'), root, engine)
xres = grizli_db.from_sql("select root, count(root) from redshift_fit where root like '{0}-%%' group by root".format(root.split('-')[0]), engine)
print(xres)
# Update OBJID for natural join
# for tab in ['redshift_fit', 'redshift_fit_quasar', 'multibeam']
SQL = """
WITH sub AS (
SELECT objid as p_objid, p_root, p_id
FROM photometry_apcorr
)
UPDATE redshift_fit
SET objid = p_objid
FROM sub
WHERE phot_root = p_root AND id = p_id;
"""
db.from_sql(SQL, engine)
engine.execute(SQL)
def wait_on_db_update(root, t0=60, dt=30, n_iter=60, engine=None):
"""
Wait for db to stop updating on root
"""
import pandas as pd
from astropy.table import Table
from grizli.aws import db as grizli_db
import numpy as np
import time
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
n_i, n6_i, checksum_i = -1, -1, -1
for i in range(n_iter):
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE root = '{0}'".format(root), engine)
checksum = (2**res['status']).sum()
n = len(res)
n6 = (res['status'] == 6).sum()
n5 = (res['status'] == 5).sum()
if (n == n_i) & (checksum == checksum_i) & (n6 == n6_i):
break
now = time.ctime()
print('{0}, {1}: n={2:<5d} n5={5:<5d} n6={3:<5d} checksum={4}'.format(root, now, n, n6, checksum, n5))
n_i, n6_i, checksum_i = n, n6, checksum
if i == 0:
time.sleep(t0)
else:
time.sleep(dt)
return res
##
def fit_timeouts(root='j004404m2034', mag_limits=[15, 26], sn_limit=7, min_status=None, engine=None):
"""
Run redshift fits on lambda for a given root
"""
from grizli.aws import fit_redshift_lambda
from grizli import utils
from grizli.aws import db as grizli_db
if engine is None:
engine = grizli_db.get_db_engine()
import pandas as pd
import numpy as np
import glob
import os
res = pd.read_sql_query("SELECT id, status FROM redshift_fit WHERE root = '{0}' AND status = 5".format(root), engine)
if len(res) == 0:
return True
ids = res['id'].tolist()
fit_redshift_lambda.fit_lambda(root=root, beams=[], ids=ids, newfunc=False, bucket_name='grizli-v1', skip_existing=False, sleep=False, skip_started=False, quasar_fit=False, output_path=None, show_event=False, zr=[0.01, 2.4], force_args=True)
res = grizli_db.wait_on_db_update(root, dt=15, n_iter=120, engine=engine)
return res
# All timeouts
events = fit_redshift_lambda.fit_lambda(root='egs-g800l-j141956p5255', beams=[], ids=[20667], newfunc=False, bucket_name='grizli-v1', skip_existing=False, sleep=False, skip_started=False, quasar_fit=False, output_path=None, show_event=2, zr=[0.01, 2.4], force_args=True)
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root NOT LIKE 'cos-grism%%' ORDER BY root".format(root), engine)
base = {'bucket': 'grizli-v1', 'skip_started': False, 'quasar_fit': False, 'zr': '0.01,2.4', 'force_args': True, 'bad_pa_threshold': 10, 'use_phot_obj': False, 'save_figures': 'png'}
all_events = fit_redshift_lambda.generate_events(res['root'], res['id'], base=base, send_to_lambda=True)
#################
# Fit locally on EC2
i0 = 0
import os
import pandas as pd
import numpy as np
from grizli.aws import db as grizli_db
from grizli.aws import fit_redshift_lambda, lambda_handler
engine = grizli_db.get_db_engine(echo=False)
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root NOT LIKE 'cos-grism%%' AND root LIKE '%%-grism%%' ORDER BY root", engine)
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root NOT LIKE 'cos-grism%%' AND root NOT LIKE '%%-grism%%' AND root NOT LIKE '%%g800l%%' ORDER BY root", engine)
bucket = 'grizli-v1'
res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'j114936p2222' ORDER BY id", engine)
bucket = 'grizli-v1'
# res = pd.read_sql_query("SELECT root, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'cos-grism%%' order by id", engine)
# bucket = 'grizli-cosmos-v2'
N = len(res)
np.random.seed(1)
so = np.argsort(np.random.normal(size=N))
base = {'bucket': bucket, 'skip_started': False, 'quasar_fit': False, 'zr': '0.01,3.4', 'force_args': True, 'bad_pa_threshold': 10, 'use_phot_obj': False, 'save_figures': 'png', 'verbose': True, 'working_directory': os.getcwd()}
events = fit_redshift_lambda.generate_events(res['root'], res['id'], base=base, send_to_lambda=False)
for event in events[i0::2]:
lambda_handler.handler(event, {})
########
xres = pd.read_sql_query("SELECT root, p_ra as ra, p_dec as dec, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'gds-grism%%' ORDER BY root".format(root), engine)
print(len(res), len(xres))
# show points
xres = pd.read_sql_query("SELECT root, p_ra as ra, p_dec as dec, id, status FROM redshift_fit WHERE status = 5 AND root LIKE 'gds-grism%%' ORDER BY root".format(root), engine)
# Photometry table
def set_filter_bits(phot):
"""
Set bits indicating available filters
"""
import numpy as np
filters = ['f160w', 'f140w', 'f125w', 'f110w', 'f105w', 'f098m', 'f850lp', 'f814w', 'f775w', 'f625w', 'f606w', 'f475w', 'f438w', 'f435w', 'f555w', 'f350lp', 'f390w', 'f336w', 'f275w', 'f225w']
bits = [np.uint32(2**i) for i in range(len(filters))]
phot['filter_bit'] = np.zeros(len(phot), dtype=np.uint32)
phot['red_bit'] = np.zeros(len(phot), dtype=np.uint32)
for i, filt in enumerate(filters):
col = '{0}_flux_aper_0'.format(filt)
if col in phot.colnames:
red = bits[i] * np.isfinite(phot[col]) * (phot['filter_bit'] == 0)
phot['filter_bit'] |= bits[i] * np.isfinite(phot[col])
phot['red_bit'] |= red
print(filt, i, bits[i], red.max())
def phot_to_dataframe(phot, root):
"""
Convert phot_apcorr.fits table to a pandas DataFrame
- Add 'root' column
- remove "dummy" columns
- rename 'xmin', 'xmax', 'ymin', 'ymax' to 'image_xmin', ...
"""
phot['root'] = root
set_filter_bits(phot)
for c in ['dummy_flux', 'dummy_err']:
if c in phot.colnames:
phot.remove_column(c)
for c in ['xmin', 'xmax', 'ymin', 'ymax']:
phot.rename_column(c, 'image_'+c)
for c in ['root', 'id', 'ra', 'dec']:
phot.rename_column(c, 'p_'+c)
df = phot.to_pandas()
return df
def add_phot_to_db(root, delete=False, engine=None, nmax=500):
"""
Read the table {root}_phot_apcorr.fits and append it to the grizli_db `photometry_apcorr` table
"""
import pandas as pd
from astropy.table import Table
from grizli.aws import db as grizli_db
import numpy as np
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
res = pd.read_sql_query("SELECT p_root, p_id FROM photometry_apcorr WHERE p_root = '{0}'".format(root), engine)
if len(res) > 0:
if delete:
print('Delete rows where root={0}'.format(root))
res = engine.execute("DELETE from photometry_apcorr WHERE (p_root = '{0}')".format(root))
if False:
res = engine.execute("DELETE from redshift_fit WHERE (root = '{0}')".format(root))
else:
print('Data found for root={0}, delete them if necessary'.format(root))
return False
# Read the catalog
phot = Table.read('{0}_phot_apcorr.fits'.format(root), character_as_bytes=False)
# remove columns
remove = []
for c in phot.colnames:
if ('_corr_' in c) | ('_ecorr_' in c) | (c[-5:] in ['tot_4', 'tot_5', 'tot_6']) | ('dummy' in c):
remove.append(c)
phot.remove_columns(remove)
# Add new filter columns if necessary
empty = pd.read_sql_query("SELECT * FROM photometry_apcorr WHERE false", engine)
df = phot_to_dataframe(phot, root)
new_cols = []
for c in df.columns:
if c not in empty.columns:
new_cols.append(c)
if len(new_cols) > 0:
for c in new_cols:
print('Add column {0} to `photometry_apcorr` table'.format(c))
sql = "ALTER TABLE photometry_apcorr ADD COLUMN {0} real;".format(c)
res = engine.execute(sql)
# Add new table
print('Send {0}_phot_apcorr.fits to `photometry_apcorr`.'.format(root))
if nmax > 0:
# Split
N = len(phot) // nmax
for i in range(N+1):
print(' add rows {0:>5}-{1:>5} ({2}/{3})'.format(i*nmax, (i+1)*nmax, i+1, N+1))
df[i*nmax:(i+1)*nmax].to_sql('photometry_apcorr', engine, index=False, if_exists='append', method='multi')
else:
df.to_sql('photometry_apcorr', engine, index=False, if_exists='append', method='multi')
def multibeam_to_database(beams_file, engine=None, Rspline=15, force=False, **kwargs):
"""
Send statistics of the beams.fits file to the database
"""
import numpy as np
import pandas as pd
from astropy.time import Time
from .. import multifit, utils
if engine is None:
engine = get_db_engine(echo=False)
mtime = Time(os.stat(beams_file).st_mtime, format='unix').iso
root = beams_file.split('_')[0]
id = int(beams_file.split('_')[1].split('.')[0])
res = pd.read_sql_query("SELECT mtime from multibeam WHERE (root = '{0}' AND id = {1})".format(root, id), engine)
if len(res) == 1:
if (res['mtime'][0] == mtime) & (not force):
print('{0} already in multibeam table'.format(beams_file))
return True
mb = multifit.MultiBeam(beams_file, **kwargs)
print('Update `multibeam` and `beam_geometry` tables for {0}.'.format(beams_file))
# Dummy for loading the templates the same way as for the quasars
# for generating the spline fit
templ_args = {'uv_line_complex': True,
'broad_fwhm': 2800,
'narrow_fwhm': 1000,
'fixed_narrow_lines': True,
'Rspline': Rspline,
'include_reddened_balmer_lines': False}
q0, q1 = utils.load_quasar_templates(**templ_args)
for t in list(q0.keys()):
if 'bspl' not in t:
q0.pop(t)
tfit = mb.template_at_z(0, templates=q0, fitter='lstsq')
sp = tfit['line1d'].wave, tfit['line1d'].flux
m2d = mb.get_flat_model(sp, apply_mask=True, is_cgs=True)
mb.initialize_masked_arrays()
chi0 = (mb.scif_mask**2*mb.ivarf[mb.fit_mask]).sum()
# Percentiles of masked contam, sci, err and contam/sci
pvals = np.arange(5, 96, 5)
mpos = m2d > 0
contam_percentiles = np.percentile(mb.contamf_mask, pvals)
sci_percentiles = np.percentile(mb.scif_mask, pvals)
err_percentiles = np.percentile(1/mb.sivarf[mb.fit_mask], pvals)
sn_percentiles = np.percentile(mb.scif_mask*mb.sivarf[mb.fit_mask], pvals)
fcontam_percentiles = np.percentile(mb.contamf_mask/mb.scif_mask, pvals)
# multibeam dataframe
df = pd.DataFrame()
float_type = np.float
df['root'] = [root]
df['id'] = [id]
df['objid'] = [-1]
df['mtime'] = [mtime]
df['status'] = [6]
df['scip'] = [list(sci_percentiles.astype(float_type))]
df['errp'] = [list(err_percentiles.astype(float_type))]
df['snp'] = [list(sn_percentiles.astype(float_type))]
df['snmax'] = [float_type((mb.scif_mask*mb.sivarf[mb.fit_mask]).max())]
df['contamp'] = [list(contam_percentiles.astype(float_type))]
df['fcontamp'] = [list(fcontam_percentiles.astype(float_type))]
df['chi0'] = [np.int32(chi0)]
df['rspline'] = [Rspline]
df['chispl'] = [np.int32(tfit['chi2'])]
df['mb_dof'] = [mb.DoF]
df['wmin'] = [np.int32(mb.wave_mask.min())]
df['wmax'] = [np.int32(mb.wave_mask.max())]
# Input args
for a in ['fcontam', 'sys_err', 'min_sens', 'min_mask']:
df[a] = [getattr(mb, a)]
# Send to DB
res = engine.execute("DELETE from multibeam WHERE (root = '{0}' AND id = {1})".format(mb.group_name, mb.id), engine)
df.to_sql('multibeam', engine, index=False, if_exists='append', method='multi')
# beams dataframe
d = {}
for k in ['root', 'id', 'objid', 'filter', 'pupil', 'pa', 'instrument', 'fwcpos', 'order', 'parent', 'parent_ext', 'ccdchip', 'sci_extn', 'exptime', 'origin_x', 'origin_y', 'pad', 'nx', 'ny', 'sregion']:
d[k] = []
for beam in mb.beams:
d['root'].append(root)
d['id'].append(id)
d['objid'].append(-1)
for a in ['filter', 'pupil', 'instrument', 'pad',
'fwcpos', 'ccdchip', 'sci_extn', 'exptime']:
d[a].append(getattr(beam.grism, a))
d['order'].append(beam.beam.beam)
parent = beam.grism.parent_file.replace('.fits', '').split('_')
d['parent'].append(parent[0])
d['parent_ext'].append(parent[1])
d['origin_x'].append(beam.grism.origin[1])
d['origin_y'].append(beam.grism.origin[0])
d['nx'].append(beam.sh[1])
d['ny'].append(beam.sh[0])
f = beam.grism.wcs.calc_footprint().flatten()
fs = ','.join(['{0:.6f}'.format(c) for c in f])
d['sregion'].append('POLYGON({0})'.format(fs))
d['pa'].append(int(np.round(beam.get_dispersion_PA())))
df = pd.DataFrame.from_dict(d)
# Send to database
res = engine.execute("DELETE from beam_geometry WHERE (root = '{0}' AND id = {1})".format(mb.group_name, mb.id), engine)
df.to_sql('beam_geometry', engine, index=False, if_exists='append', method='multi')
if False:
# Fix multibeam arrays
import pandas as pd
import numpy as np
from sqlalchemy import types
from grizli.aws import db as grizli_db
engine = grizli_db.get_db_engine()
df = pd.read_sql_query('select id, root, scip, errp, snp, contamp, fcontamp from multibeam mb', engine)
c = 'snp'
data = pd.DataFrame()
data['id'] = df['id']
data['root'] = df['root']
dtype = {'root': types.String, 'id': types.Integer}
for c in df.columns:
if c.endswith('p'):
print(c)
dtype[c[:-1]+'_p'] = types.ARRAY(types.FLOAT)
data[c[:-1]+'_p'] = [list(np.cast[float](line.strip()[1:-1].split(','))) for line in df[c]]
data.to_sql('multibeam_tmp', engine, index=False, if_exists='append', method='multi')
from sqlalchemy import types
for c in df.columns:
if c.endswith('p'):
pass
for c in df.columns:
if c.endswith('p'):
sql = "ALTER TABLE multibeam ADD COLUMN {0} real[];".format(c[:-1]+'_p')
print(sql)
sql = "UPDATE multibeam mb SET {new} = tmp.{new} FROM multibeam_tmp tmp WHERE tmp.id = mb.id AND tmp.root = mb.root;".format(new=c[:-1]+'_p')
print(sql)
x = grizli_db.from_sql('select id, scip, errp, snp, contamp, fcontamp from multibeam mb', engine)
def test_join():
import pandas as pd
res = pd.read_sql_query("SELECT root, id, flux_radius, mag_auto, z_map, status, bic_diff, zwidth1, log_pdf_max, chinu FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE z_map > 0) z ON (p.p_root = z.root AND p.p_id = z.id)".format(root), engine)
res = pd.read_sql_query("SELECT * FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE z_map > 0) z ON (p.p_root = z.root AND p.p_id = z.id)".format(root), engine)
# on root
res = pd.read_sql_query("SELECT p.root, p.id, mag_auto, z_map, status FROM photometry_apcorr AS p JOIN (SELECT * FROM redshift_fit WHERE root='{0}') z ON (p.p_root = z.root AND p.p_id = z.id)".format(root), engine)
def column_comments():
from collections import OrderedDict
import yaml
tablename = 'redshift_fit'
cols = pd.read_sql_query('select * from {0} where false'.format(tablename), engine)
d = {} # OrderedDict{}
for c in cols.columns:
d[c] = '---'
if not os.path.exists('{0}_comments.yml'.format(tablename)):
print('Init {0}_comments.yml'.format(tablename))
fp = open('{0}_comments.yml'.format(tablename), 'w')
yaml.dump(d, stream=fp, default_flow_style=False)
fp.close()
# Edit file
comments = yaml.load(open('{0}_comments.yml'.format(tablename)))
SQL = ""
upd = "COMMENT ON COLUMN {0}.{1} IS '{2}';\n"
for col in comments:
if comments[col] != '---':
SQL += upd.format(tablename, col, comments[col])
else:
print('Skip ', col)
def add_spectroscopic_redshifts(xtab, rmatch=1, engine=None, db=None):
"""
Add spectroscopic redshifts to the photometry_apcorr table
Input table needs (at least) columns:
['ra', 'dec', 'z_spec', 'z_spec_src', 'z_spec_qual_raw', 'z_spec_qual']
"""
import glob
import pandas as pd
from astropy.table import vstack
from grizli.aws import db as grizli_db
from grizli import utils
for c in ['ra', 'dec', 'z_spec', 'z_spec_src', 'z_spec_qual_raw', 'z_spec_qual']:
if c not in xtab.colnames:
print('Column {0} not found in input table'.format(c))
return False
if engine is None:
engine = grizli_db.get_db_engine(echo=False)
# Force data types
tab = xtab[xtab['z_spec'] >= 0]
if hasattr(tab['ra'], 'mask'):
tab = tab[~tab['ra'].mask]
tab['z_spec_qual'] = tab['z_spec_qual']*1
tab['z_spec_qual_raw'] = tab['z_spec_qual_raw']*1
if False:
# duplicates
fit = grizli_db.from_sql("select root, ra, dec from redshift_fit", engine)
fit = grizli_db.from_sql("select root, ra, dec from redshift_fit where ra is null", engine)
# Select master table
if db is None:
res = pd.read_sql_query("SELECT p_root, p_id, p_ra, p_dec, z_spec from photometry_apcorr", engine)
db = utils.GTable.from_pandas(res)
for c in ['p_root', 'p_id', 'p_ra', 'p_dec']:
db.rename_column(c, c[2:])
idx, dr = db.match_to_catalog_sky(tab)
hasm = (dr.value < rmatch) & (tab['z_spec'] >= 0)
tab['z_spec_dr'] = dr.value
tab['z_spec_ra'] = tab['ra']
tab['z_spec_dec'] = tab['dec']
tab['db_root'] = db['root'][idx]
tab['db_id'] = db['id'][idx]
tabm = tab[hasm]['db_root', 'db_id', 'z_spec', 'z_spec_src', 'z_spec_dr', 'z_spec_ra', 'z_spec_dec', 'z_spec_qual_raw', 'z_spec_qual']
print('Send zspec to photometry_apcorr (N={0})'.format(hasm.sum()))
df = tabm.to_pandas()
df.to_sql('z_spec_tmp', engine, index=False, if_exists='replace', method='multi')
SQL = """UPDATE photometry_apcorr
SET z_spec = zt.z_spec,
z_spec_src = zt.z_spec_src,
z_spec_dr = zt.z_spec_dr,
z_spec_ra = zt.z_spec_ra,
z_spec_dec = zt.z_spec_dec,
z_spec_qual_raw = zt.z_spec_qual_raw,
z_spec_qual = zt.z_spec_qual
FROM z_spec_tmp as zt
WHERE (zt.db_root = p_root AND zt.db_id = p_id);
"""
engine.execute(SQL)
if False:
# Update redshift_fit ra/dec with photometry_table double prec.
SQL = """UPDATE redshift_fit
SET ra = p_ra
dec = p_dec
FROM photometry_apcorr
WHERE (phot_root = p_root AND id = p_id AND root = 'j123556p6221');
"""
def mtime_to_iso(ct):
"""
Convert mtime values to ISO format suitable for sorting, etc.
"""
months = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
spl = ct.split()
iso = '{yr}-{mo:02d}-{dy:02d} {time}'.format(dy=int(spl[2]), mo=int(months.index(spl[1])+1), yr=spl[-1], time=spl[-2])
return iso
def various_selections():
# sdss z_spec
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND z_spec > 0 AND z_spec_qual = 1 AND z_spec_src ~ '^sdss-dr15'", table_root='sdss_zspec', sync='s3://grizli-v1/tables/')
# objects with carla redshifts (radio loud)
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND z_spec > 0 AND z_spec_qual = 1 AND z_spec_src ~ '^carla'", table_root='carla_zspec', sync='s3://grizli-v1/tables/')
# Bright galaxies with q_z flag
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'q_z', 'q_z > -0.69 as q_z_TPR90', 'dlinesn'], where="AND status > 4 AND mag_auto < 22 AND z_map > 1.3", table_root='bright', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
# High-z compiliation
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'q_z', 'h_zphot', 'h_src', 'h_dr'], where="AND status > 4 AND phot_root = h_root AND id = h_id AND h_dr < 1", tables=['highz_2015'], table_root='highz', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
# z_spec with dz
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'zwidth1/(1+z_map) as zw1', '(z_map-z_spec)/(1+z_spec) as dz', 'dlinesn'], where="AND status > 4 AND z_spec > 0 AND z_spec_qual = 1", table_root='zspec_delta', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# Point sources
res = grizli_db.make_html_table(engine=engine, columns=['root', 'id', 'red_bit', 'status', 'p_ra', 'p_dec', 't_g800l', 't_g102', 't_g141', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'z_spec_dr', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND mag_auto < 24 AND flux_radius < 1.9 AND ((flux_radius < 1.5 AND flux_radius > 0.75 AND red_bit > 32) OR (flux_radius < 1.9 AND flux_radius > 1.0 AND red_bit < 32))", table_root='point_sources', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full', 'qso.full', 'star'], get_sql=False)
# Reliable redshifts
res = grizli_db.make_html_table(engine=engine, columns=['root', 'id', 'status', 'p_ra', 'p_dec', 't_g800l', 't_g102', 't_g141', 'mag_auto', 'flux_radius', '(flux_radius < 1.7 AND ((flux_radius < 1.4 AND flux_radius > 0.75 AND red_bit > 32) OR (flux_radius < 1.7 AND flux_radius > 1.0 AND red_bit < 32)))::int as is_point', 'z_map', 'z_spec', 'z_spec_src', 'z_spec_dr', 'sn_siii', 'sn_ha', 'sn_oiii', 'sn_oii', 'ew50_ha', 'd4000', 'd4000_e', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND chinu < 30 AND q_z > -0.7 order by q_z", table_root='reliable_redshifts', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full'], get_sql=False, sort_column=('q_z', -1))
# stellar classification?
# sql = """SELECT root, id, ra, dec, status, z_map, q_z_map, bic_diff,
# bic_diff_star,
# chinu as t_chinu, s_chinu, q_chinu,
# chinu - q_chinu as tq_chinu, q_chinu - s_chinu as qs_chinu,
# chinu - s_chinu as ts_chinu, stellar_template
# FROM redshift_fit,
# (SELECT root as s_root, id as s_id, chinu as s_chinu, bic_diff_star,
# stellar_template
# FROM stellar_fit
# WHERE status = 6
# ) as s,
# (SELECT root as q_root, id as q_id, chinu as q_chinu,
# bic_diff as q_bic_diff, z_map as q_z_map
# FROM redshift_fit_quasar
# WHERE status = 6
# ) as q
# WHERE (root = s_root AND id = s_id) AND (root = q_root AND id = q_id)
# """
#res = grizli_db.make_html_table(engine=engine, res=cstar, table_root='carbon_stars', sync='s3://grizli-v1/tables/', png_ext=['stack','line', 'full', 'qso.full', 'star'], sort_column=('bic_diff_star', -1), get_sql=False)
sql = """SELECT root, id, status, ra, dec, t_g800l, t_g102, t_g141,
z_map, q_z_map, bic_diff,
bic_diff_star, (bic_diff_star > 10 AND q_chinu < 20 AND chinu - q_chinu > 0.05 AND q_chinu-s_chinu > 0 AND chinu-s_chinu > 0.1)::int as is_star,
chinu as t_chinu, s_chinu, q_chinu,
bic_qso-bic_gal as bic_gq,
bic_gal-bic_star as bic_gs,
bic_qso-bic_star as bic_qs,
(bic_spl+chimin)-bic_gal as bic_gx,
bic_spl_qso-bic_qso as bic_qx,
q_vel_bl, qso_q_z, qso_zw1, stellar_template
FROM (SELECT *, bic_temp+chimin as bic_gal FROM redshift_fit z,
(SELECT root as q_root, id as q_id, chinu as q_chinu,
bic_diff as q_bic_diff, bic_temp+chimin as bic_qso,
bic_spl+chimin as bic_spl_qso,
z_map as qso_z_map,
zwidth1/(1+z_map) as qso_zw1, vel_bl as q_vel_bl,
q_z as qso_q_z
FROM redshift_fit_quasar
WHERE status = 6
) q
WHERE (root = q_root AND id = q_id)) c
LEFT JOIN
(SELECT root as s_root, id as s_id, chinu as s_chinu,
LN(dof)*nk+chi2 as bic_star,
LN(dof)*(nk-1)+chi2_flat as bic_spline,
bic_diff_star,
stellar_template
FROM stellar_fit
WHERE status = 6
) s ON (root = s_root AND id = s_id) WHERE chinu-q_chinu > 0.5
"""
cstar = grizli_db.from_sql(sql, engine)
cstar['is_star'] = cstar['is_star'].filled(-1)
print('N={0}'.format(len(cstar)))
res = grizli_db.make_html_table(engine=engine, res=cstar, table_root='quasars_and_stars', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full', 'qso.full', 'star'], sort_column=('bic_diff_star', -1), get_sql=False)
# best-fit as quasar
sql = """SELECT root, id, ra, dec, status, z_map, q_z_map,
q_z, bic_diff, q_bic_diff,
chinu as t_chinu, q_chinu,
chinu - q_chinu as tq_chinu,
(q_bic_temp + q_chimin) - (bic_temp + chimin) as bic_diff_quasar,
q_vel_bl
FROM redshift_fit z JOIN
(SELECT root as q_root, id as q_id, chinu as q_chinu,
bic_diff as q_bic_diff, z_map as q_z_map, vel_bl,
chimin as q_chimin, bic_temp as q_bic_temp, vel_bl as q_vel_bl
FROM redshift_fit_quasar
WHERE status = 6
) as q
WHERE (root = q_root AND id = q_id) AND status = 6 AND q_z > -1
"""
qq = grizli_db.from_sql(sql, engine)
res = grizli_db.make_html_table(engine=engine, res=qq, table_root='quasar_fit', sync='s3://grizli-v1/tables/', png_ext=['stack', 'line', 'full', 'qso.full', 'star'], get_sql=False)
# Strong lines
res = grizli_db.make_html_table(engine=engine, columns=['root', 'id', 'red_bit', 'status', 'p_ra', 'p_dec', 't_g800l', 't_g102', 't_g141', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'z_spec_dr', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn', 'sn_ha', 'sn_oiii', 'sn_oii'], where="AND status > 4 AND mag_auto < 24 AND (sn_ha > 10 OR sn_oiii > 10 OR sn_oii > 10) AND flux_radius >= 1.6", table_root='strong_lines', sync='s3://grizli-v1/tables/', png_ext=['stack', 'full', 'qso.full', 'star'])
# brown dwarf?
tablename = 'spec1d_r30_g141'
wave = pd.read_sql_query("SELECT * from {0}_wave".format(tablename),
engine)[tablename+'_wave'].values
# 1.15, 1.25, 1.4
i0 = 25, 28, 29, 32
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn', '{0}_flux[25]/{0}_flux[28] as c1'.format(tablename), '{0}_flux[32]/{0}_flux[28] as c2'.format(tablename)], where="AND status > 4 AND flux_radius < 2 AND flux_radius > 1 AND mag_auto < 25 AND {0}_root = root AND {0}_id = id AND {0}_flux[28] > 0 AND {0}_flux[28]/{0}_err[28] > 5 AND {0}_flux[32] > 0 AND {0}_flux[25] > 0 AND {0}_flux[32]/{0}_flux[28] < 0.5".format(tablename), tables=[tablename], table_root='point_sources_colors', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_map', 'z_spec', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'q_z', 'zwidth1/(1+z_map) as zw1', 'dlinesn', '{0}_flux[25] as c25'.format(tablename), '{0}_flux[32] as c32'.format(tablename)], where="AND status > 4 AND z_spec = 0".format(tablename), tables=[tablename], table_root='point_sources_colors', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# with line ratios
lstr = 'err_{0} > 0 AND err_{0} < 5e-17'
err_lines = ' AND '.join(lstr.format(li) for li in
['hb', 'oiii', 'ha', 'sii'])
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'z_spec_src', 'bic_diff', 'chinu', 'log_pdf_max', 'zwidth1/(1+z_map) as zw1', '(z_map-z_spec)/(1+z_spec) as dz', 'dlinesn', 'flux_hb/flux_ha as HbHa', 'flux_hb/flux_oiii as HbO3', 'flux_oiii/flux_ha as O3Ha'], where="AND status > 4 AND z_spec > 0 AND z_spec_qual = 1 AND sn_oiii > 3 AND sn_ha > 2 AND {0}".format(err_lines), table_root='zspec_lines', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
if False:
from matplotlib.ticker import FixedLocator, AutoLocator, MaxNLocator
xti = xt = np.arange(0, 3.6, 0.5)
loc = np.arange(0, 3.6, 0.1)
bins = utils.log_zgrid([0.03, 3.5], 0.01)
fig = plt.figure(figsize=[7, 6])
ax = fig.add_subplot(111)
ax.scatter(np.log(1+res['z_spec']), np.log(1+res['z_map']), alpha=0.2, c=np.log10(res['zw1']), marker='.', vmin=-3.5, vmax=-0.5, cmap='plasma')
sc = ax.scatter(np.log([1]), np.log([1]), alpha=0.8, c=[0], marker='.', vmin=-3.5, vmax=-0.5, cmap='plasma')
cb = plt.colorbar(sc, shrink=0.6)
cb.set_label(r'$(z_{84}-z_{16})/(1+z_{50})$')
cb.set_ticks([-3, -2, -1])
cb.set_ticklabels([0.001, 0.01, 0.1])
xts = ax.set_xticks(np.log(1+xt))
xtl = ax.set_xticklabels(xti)
xts = ax.set_yticks(np.log(1+xt))
xtl = ax.set_yticklabels(xti)
ax.set_xlim(0, np.log(1+3.5))
ax.set_ylim(0, np.log(1+3.5))
ax.xaxis.set_minor_locator(FixedLocator(np.log(1+loc)))
ax.yaxis.set_minor_locator(FixedLocator(np.log(1+loc)))
ax.set_xlabel('z_spec')
ax.set_ylabel('z_MAP')
ax.set_aspect(1)
ax.grid()
ax.text(0.95, 0.05, r'$N={0}$'.format(len(res)), ha='right', va='bottom', transform=ax.transAxes)
ax.plot(ax.get_xlim(), ax.get_xlim(), color='k', alpha=0.2, linewidth=1, zorder=-10)
fig.tight_layout(pad=0.1)
fig.savefig('grizli_v1_literature_zspec.pdf')
# COSMOS test
root = 'cos-grism-j100012p0210'
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND bic_diff > 100 AND root = '{0}'".format(root), table_root=root, sync='s3://grizli-v1/Pipeline/{0}/Extractions/'.format(root), png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
# high bic_diff = unambiguous
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'd4000', 'd4000_e', '-(bic_temp-bic_spl) as bic_diff_spl'], where="AND status > 5 AND (((bic_diff > 50 OR zwidth1/(1+z_map) < 0.01) AND chinu < 2))", table_root='unamb', sync='s3://grizli-v1/tables/')
# with d4000
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'd4000', 'd4000_e'], where="AND status > 5 AND chinu < 3 AND d4000 > 1 AND d4000 < 5 AND d4000_e > 0 AND d4000_e < 0.25 AND bic_diff > 5", table_root='d4000', sync='s3://grizli-v1/tables/')
# LBG?
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', '-(bic_temp-bic_spl) as bic_diff_spl', 'splf01/splf02 as r12', 'splf02/splf03 as r23', 'splf02/sple02 as sn02'], where="AND status > 5 AND mag_auto > 23 AND bic_diff > -50 AND splf01/splf02 < 0.3 AND splf02/sple02 > 2 AND splf01 != 0 AND splf02 != 0 AND splf03 != 0 ".format(root), table_root='lbg_g800l', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# stars?
res = make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND bic_diff > 100 AND chinu < 1.5 AND mag_auto < 24 AND sn_Ha > 20", table_root='star', sync='s3://grizli-v1/tables/')
# By root
root = 'j001420m3030'
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max'], where="AND status > 5 AND root = '{0}' AND bic_diff > 5".format(root), table_root=root+'-fit', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
# G800L spec-zs
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', '(z_map-z_spec)/(1+z_spec) as delta_z'], where="AND status > 5 AND z_spec > 0 AND z_spec_qual = 1 AND t_g800l > 0", table_root='zspec_g800l', sync='s3://grizli-v1/tables/')
# Large G800L likely mismatch [OIII]/Ha
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'ew50_oiii/(1+z_map) as ew_oiii_rest', 'sn_oiii'], where="AND status > 5 AND t_g800l > 0 AND sn_oiii > 3 AND mag_auto < 23 AND bic_diff > 5", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/')
# Potential Ly-a?
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'ew50_oiii/(1+z_map) as ew_oiii_rest', 'sn_oiii'], where="AND status > 5 AND t_g800l > 0 AND sn_oiii > 5 AND sn_ha > 0 AND flux_oiii/flux_ha > 1.8", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/')
# Continuum resid
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', '23.9-2.5*log(splf01*8140*8140/3.e18*1.e29)-mag_auto as dmag'], where="AND status > 5 AND bic_diff > 5 AND splf01 > 0 AND bic_diff > 50".format(root), table_root='xxx', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
res = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'sn_oiii', 'f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 as fresid', 'splf01/sple01 as sn01', '23.9-2.5*log(splf01*8140*8140/3.e18*1.e29)-mag_auto as dmag'], where="AND status > 5 AND t_g800l > 0 AND f814w_tot_1 > 0 AND splf01 != 0 AND splf01/sple01 > 1 AND f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 > 0 AND (f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 < 0.3 OR f814w_tot_1*3.e18/8140/8140/splf01*1.e-29 > 4)", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'])
sql = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'sn_oiii', 'splf01', 'sple01', 'f814w_tot_1', 'f850lp_tot_1', 'flux_auto/flux_iso as flux_aper_corr', '23.9-2.5*log(splf01*8140*8140/3.e18*1.e29)-mag_auto as dmag'], where="AND status > 5 AND t_g800l > 0 AND splf01 > 0", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], get_sql=True)
res = pd.read_sql_query(sql, engine)
splmag = 23.9-2.5*np.log10(np.maximum(res['splf01'], 1.e-22)*8140**2/3.e18*1.e29)
sql = grizli_db.make_html_table(engine=engine, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'a_image', 'flux_radius', 't_g800l', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'err_ha', 'sn_oiii', 'splf03', 'sple03', 'f140w_tot_1', 'f160w_tot_1', 'flux_auto/flux_iso as flux_aper_corr'], where="AND status > 5 AND t_g141 > 0 AND sple03 > 0", table_root='g800l_oiii_mismatch', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], get_sql=True)
res = pd.read_sql_query(sql, engine)
splmag = 23.9-2.5*np.log10(np.maximum(res['splf03'], 1.e-22)*1.2e4**2/3.e18*1.e29)
# Number of matches per field
counts = pd.read_sql_query("select root, COUNT(root) as n from redshift_fit, photometry_apcorr where phot_root = p_root AND id = p_id AND bic_diff > 50 AND mag_auto < 24 group by root;", engine)
def from_sql(query, engine):
import pandas as pd
from grizli import utils
res = pd.read_sql_query(query, engine)
tab = utils.GTable.from_pandas(res)
set_column_formats(tab)
return tab
def render_for_notebook(tab, image_extensions=['stack', 'full', 'line'], bucket='grizli-v1', max_rows=20, link_root=True):
"""
Render images for inline display in a notebook
In [1]: from IPython.display import HTML
In [2]: HTML(tab)
"""
import pandas as pd
pd.set_option('display.max_colwidth', -1)
rows = tab[:max_rows].copy()
buckets = [bucket]*len(rows)
for i, r in enumerate(rows['root']):
if r.startswith('cos-g'):
buckets[i] = 'grizli-cosmos-v2'
rows['bucket'] = buckets
rows['ext'] = 'longstring' # longer than the longest extension
s3url = 'https://s3.amazonaws.com/{bucket}/Pipeline/{root}/Extractions/{root}_{id:05d}.{ext}.png'
def href_root(root):
if root.startswith('cos-g'):
bucket_i = 'grizli-cosmos-v2'
else:
bucket_i = bucket
s3 = 'https://s3.amazonaws.com/'+bucket_i+'/Pipeline/{0}/Extractions/{0}.html'
return '<a href={0}>{1}</a>'.format(s3.format(root), root)
def path_to_image_html(path):
return '<a href={0}><img src="{0}"/></a>'.format(path)
# link for root
if link_root:
fmt = {'root': href_root}
else:
fmt = {}
for ext in image_extensions:
rows['ext'] = ext
urls = [s3url.format(**row) for row in rows.to_pandas().to_dict(orient='records')]
rows[ext] = urls
fmt[ext] = path_to_image_html
rows.remove_columns(['bucket', 'ext'])
out = rows.to_pandas().to_html(escape=False, formatters=fmt)
return out
def add_to_charge():
engine = grizli_db.get_db_engine()
p = pd.read_sql_query('select distinct p_root from photometry_apcorr', engine)
f = pd.read_sql_query('select distinct field_root from charge_fields', engine)
new_fields = []
for root in p['p_root'].values:
if root not in f['field_root'].values:
print(root)
new_fields.append(root)
df = pd.DataFrame()
df['field_root'] = new_fields
df['comment'] = 'CANDELS'
ix = df['field_root'] == 'j214224m4420'
df['comment'][ix] = 'Rafelski UltraDeep'
df.to_sql('charge_fields', engine, index=False, if_exists='append', method='multi')
def overview_table():
"""
Generate a new overview table with the redshift histograms
"""
from grizli.aws import db as grizli_db
import pandas as pd
from grizli import utils
engine = grizli_db.get_db_engine()
ch = from_sql("select * from charge_fields", engine)
by_mag = from_sql("select p_root as root, COUNT(p_root) as nmag from photometry_apcorr where mag_auto < 24 group by p_root;", engine)
by_nz = from_sql("select root, COUNT(root) as nz from redshift_fit where bic_diff > 30 group by root;", engine)
for count in [by_mag, by_nz]:
new_col = count.colnames[1]
ch[new_col] = -1
for r, n in zip(count['root'], count[new_col]):
ix = ch['field_root'] == r
ch[new_col][ix] = n
zhist = ['https://s3.amazonaws.com/grizli-v1/Pipeline/{0}/Extractions/{0}_zhist.png'.format(r) for r in ch['field_root']]
ch['zhist'] = ['<a href="{1}"><img src={0} height=300px></a>'.format(zh, zh.replace('_zhist.png', '.html')) for zh in zhist]
cols = ['field_root', 'field_ra', 'field_dec', 'mw_ebv', 'gaia5', 'nassoc', 'nfilt', 'filter', 'target', 'comment', 'proposal_id', 'proposal_pi', 'field_t_g800l', 'field_t_g102', 'field_t_g141', 'mast', 'footprint', 'rgb', 'nmag', 'nz', 'zhist', 'summary', 'log']
sortable = []
for c in cols:
if not hasattr(ch[c][0], 'upper'):
sortable.append(c)
# https://s3.amazonaws.com/grizli-v1/Master/CHArGE-July2019.html
table_root = 'CHArGE-July2019.zhist'
ch[cols].write_sortable_html('{0}.html'.format(table_root), replace_braces=True, localhost=False, max_lines=1e5, table_id=None, table_class='display compact', css=None, filter_columns=sortable, buttons=['csv'], toggle=True, use_json=True)
os.system('aws s3 sync ./ s3://grizli-v1/Master/ --exclude "*" --include "{1}.html" --include "{1}.json" --acl public-read'.format('', table_root))
def run_all_redshift_fits():
##############
# Run all
from grizli.aws import db as grizli_db
import pandas as pd
engine = grizli_db.get_db_engine()
# By grism
res = pd.read_sql_query("select field_root, field_t_g800l, field_t_g102, field_t_g141, proposal_pi from charge_fields where (nassoc < 200 AND (field_t_g800l > 0 OR field_t_g141 > 0 OR field_t_g102 > 0) AND log LIKE '%%inish%%');", engine)
orig_roots = pd.read_sql_query('select distinct root from redshift_fit', engine)['root'].tolist()
count = 0
for i, (root, ta, tb, tr, pi) in enumerate(zip(res['field_root'], res['field_t_g800l'], res['field_t_g102'], res['field_t_g141'], res['proposal_pi'])):
if root in orig_roots:
continue
count += 1
zmax = 1.6
if tb > 0:
zmax = 2.2
if tr > 0:
zmax = 3.2
print('\n\n', i, count, root, ta, tb, tr, pi, zmax, '\n\n')
phot_root = None
try:
grizli_db.run_lambda_fits(root, phot_root=phot_root,
min_status=6, zr=[0.01, zmax])
except:
pass
####
# Redo fits on reprocessed fields
# for i in range(2,11):
# root = 'j214224m4420gr{0:02d}'.format(i)
# print(root)
#
res = engine.execute("DELETE from redshift_fit WHERE (root = '{0}')".format(root), engine)
res = engine.execute("DELETE from redshift_fit_quasar WHERE (root = '{0}')".format(root), engine)
res = engine.execute("DELETE from stellar_fit WHERE (root = '{0}')".format(root), engine)
res = engine.execute("DELETE from photometry_apcorr WHERE (p_root = '{0}')".format(root), engine)
if False:
# Remove the whole thing
res = engine.execute("DELETE from exposure_log WHERE (parent = '{0}')".format(root), engine)
res = engine.execute("DELETE from charge_fields WHERE (field_root = '{0}')".format(root), engine)
grizli_db.run_lambda_fits(root, phot_root=root, min_status=2, zr=[0.01, zmax], mag_limits=[15, 26], engine=engine)
# for root in "j233844m5528 j105732p3620 j112416p1132 j113812m1134 j113848m1134 j122852p1046 j143200p0959 j152504p0423 j122056m0205 j122816m1132 j131452p2612".split():
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'q_z', 'q_z > -0.69 as q_z_TPR90', 'dlinesn'], where="AND status > 4 AND root = '{0}'".format(root), table_root=root, sync='s3://grizli-v1/Pipeline/{0}/Extractions/'.format(root), png_ext=['R30', 'stack', 'full', 'rgb', 'line'], show_hist=True)
grizli_db.aws_rgb_thumbnails(root, engine=engine)
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_zhist.png s3://grizli-v1/tables/'.format(root))
def aws_rgb_thumbnails(root, bucket='grizli-v1', engine=None, thumb_args={}, ids=None, verbose=True, res=None):
"""
Make thumbnails for everything that has an entry in the redshift_fit table
"""
from grizli.aws import aws_drizzler, fit_redshift_lambda
if engine is None:
engine = get_db_engine(echo=False)
if res is None:
res = from_sql("SELECT root, id, ra, dec FROM redshift_fit WHERE root = '{0}' AND ra > 0".format(root), engine)
aws_prep_dir = 's3://{0}/Pipeline/{1}/Prep/'.format(bucket, root)
aws_bucket = 's3://{0}/Pipeline/{1}/Thumbnails/'.format(bucket, root)
event = {'make_segmentation_figure': True,
'aws_prep_dir': aws_prep_dir,
'single_output': True,
'combine_similar_filters': True,
'show_filters': ['visb', 'visr', 'y', 'j', 'h'],
'include_ir_psf': False,
'include_saturated': True,
'subtract_median': True,
'sync_fits': True,
'thumb_height': 2.0,
'scale_ab': 21,
'aws_bucket': aws_bucket,
'master': None,
'rgb_params': {'xsize': 4, 'output_dpi': None,
'rgb_min': -0.01, 'add_labels': False,
'output_format': 'png', 'show_ir': False,
'scl': 2, 'suffix': '.rgb', 'mask_empty': False,
'tick_interval': 1, 'pl': 1},
'remove': True,
'filters': ['f160w', 'f140w', 'f125w', 'f105w', 'f110w', 'f098m',
'f850lp', 'f814w', 'f775w', 'f606w', 'f475w',
'f555w', 'f600lp', 'f390w', 'f350lp'],
'half_optical_pixscale': True,
'theta': 0,
'kernel': 'square',
'pixfrac': 0.33,
'wcs': None,
'size': 6,
'pixscale': 0.1}
for k in thumb_args:
event[k] = thumb_args[k]
N = len(res)
for i in range(N):
id = res['id'][i]
ra = res['ra'][i]
dec = res['dec'][i]
root_i = res['root'][i]
if ids is not None:
if id not in ids:
continue
event['ra'] = ra
event['dec'] = dec
event['label'] = '{0}_{1:05d}'.format(root_i, id)
fit_redshift_lambda.send_event_lambda(event, verbose=verbose)
def count_sources_for_bad_persistence():
"""
Count the number of extracted objects for each id and look for fields
with few objects, which are usually problems with the persistence mask
"""
import pandas as pd
from grizli.aws import db as grizli_db
from grizli import utils
engine = grizli_db.get_db_engine(echo=False)
# Number of matches per field
counts = pd.read_sql_query("select root, COUNT(root) as n from redshift_fit, photometry_apcorr where phot_root = p_root AND id = p_id AND bic_diff > 5 AND mag_auto < 24 group by root;", engine)
counts = utils.GTable.from_pandas(counts)
so = np.argsort(counts['n'])
sh = """
BUCKET=grizli-v
root=j113812m1134
aws s3 rm --recursive s3://grizli-v1/Pipeline/${root}/ --include "*"
grism_run_single.sh ${root} --run_fine_alignment=True --extra_filters=g800l --bucket=grizli-v1 --preprocess_args.skip_single_optical_visits=True --mask_spikes=True --persistence_args.err_threshold=1
"""
def add_missing_photometry():
# Add missing photometry
import os
import pandas as pd
from grizli.aws import db as grizli_db
from grizli.pipeline import photoz
from grizli import utils
engine = grizli_db.get_db_engine(echo=False)
res = pd.read_sql_query("select distinct root from redshift_fit where root like 'j%%'", engine)['root'].tolist()
orig_roots = pd.read_sql_query('select distinct p_root as root from photometry_apcorr', engine)['root'].tolist()
# Missing grism fields?
res = pd.read_sql_query("select field_root as root, field_t_g800l, field_t_g102, field_t_g141, proposal_pi from charge_fields where (field_t_g800l > 0 OR field_t_g141 > 0 OR field_t_g102 > 0) AND log LIKE '%%inish%%';", engine)['root'].tolist()
orig_roots = pd.read_sql_query('select distinct root from redshift_fit', engine)['root'].tolist()
# All photometry
res = pd.read_sql_query("select field_root as root, field_t_g800l, field_t_g102, field_t_g141, proposal_pi from charge_fields where nassoc < 200 AND log LIKE '%%inish%%' AND field_root LIKE 'j%%';", engine)['root'].tolist()
orig_roots = pd.read_sql_query('select distinct p_root as root from photometry_apcorr', engine)['root'].tolist()
count = 0
for root in res:
if root not in orig_roots:
count += 1
print(count, root)
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_phot_apcorr.fits .'.format(root))
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_phot.fits .'.format(root))
if not os.path.exists('{0}_phot_apcorr.fits'.format(root)):
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Prep/{0}_phot_apcorr.fits .'.format(root))
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Prep/{0}_phot.fits .'.format(root))
if os.path.exists('{0}_phot_apcorr.fits'.format(root)):
grizli_db.add_phot_to_db(root, delete=False, engine=engine)
else:
if os.path.exists('{0}_phot.fits'.format(root)):
# Make the apcorr file
utils.set_warnings()
total_flux = 'flux_auto'
try:
obj = photoz.eazy_photoz(root, object_only=True,
apply_prior=False, beta_prior=True,
aper_ix=1,
force=True,
get_external_photometry=False,
compute_residuals=False,
total_flux=total_flux)
except:
continue
grizli_db.add_phot_to_db(root, delete=False,
engine=engine)
# 3D-HST
copy = """
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/egs-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/egs-grism-j141956p5255/Extractions/egs-grism-j141956p5255_phot_apcorr.fits --acl public-read
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/egs-mosaic_phot.fits s3://grizli-v1/Pipeline/egs-grism-j141956p5255/Extractions/egs-grism-j141956p5255_phot.fits --acl public-read
"""
grizli_db.run_lambda_fits('egs-grism-j141956p5255', min_status=6, zr=[0.01, 3.2])
copy = """
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/uds-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/uds-grism-j021732m0512/Extractions/uds-grism-j021732m0512_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('uds-grism-j021732m0512', min_status=6, zr=[0.01, 3.2])
# GDS
copy = """
aws s3 rm s3://grizli-v1/Pipeline/gds-grism-j033236m2748/Extractions/ --recursive --exclude "*" --include "gds-grism-j033236m2748_[0-9]*"
aws s3 rm s3://grizli-v1/Pipeline/gds-g800l-j033236m2748/Extractions/ --recursive --exclude "*" --include "gds-g800l-j033236m2748_[0-9]*"
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/gds-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/gds-grism-j033236m2748/Extractions/gds-grism-j033236m2748_phot_apcorr.fits --acl public-read
aws s3 cp s3://grizli-v1/Pipeline/gds-grism-j033236m2748/Extractions/gds-grism-j033236m2748_phot_apcorr.fits s3://grizli-v1/Pipeline/gds-g800l-j033236m2748/Extractions/gds-g800l-j033236m2748_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('gds-grism-j033236m2748', phot_root='gds-grism-j033236m2748', min_status=6, zr=[0.01, 3.2], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
grizli_db.run_lambda_fits('gds-g800l-j033236m2748', phot_root='gds-grism-j033236m2748', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
# GDN
copy = """
#aws s3 rm s3://grizli-v1/Pipeline/gds-g800l-j033236m2748/Extractions/ --recursive --exclude "*" --include "gds-g800l-j033236m2748_[0-9]*"
aws s3 rm s3://grizli-v1/Pipeline/gdn-grism-j123656p6215/Extractions/ --recursive --exclude "*" --include "gdn-grism-j123656p6215_[0-9]*"
aws s3 rm s3://grizli-v1/Pipeline/gdn-g800l-j123656p6215/Extractions/ --recursive --exclude "*" --include "gdn-g800l-j123656p6215_[0-9]*"
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/gdn-mosaic_phot_apcorr.fits s3://grizli-v1/Pipeline/gdn-grism-j123656p6215/Extractions/gdn-grism-j123656p6215_phot_apcorr.fits --acl public-read
aws s3 cp s3://grizli-v1/Pipeline/gdn-grism-j123656p6215/Extractions/gdn-grism-j123656p6215_phot_apcorr.fits s3://grizli-v1/Pipeline/gdn-g800l-j123656p6215/Extractions/gdn-g800l-j123656p6215_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('gdn-grism-j123656p6215', phot_root='gdn-grism-j123656p6215', min_status=6, zr=[0.01, 3.2], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
grizli_db.run_lambda_fits('gdn-g800l-j123656p6215', phot_root='gdn-grism-j123656p6215', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
# 3D-HST G800L
copy = """
aws s3 rm s3://grizli-v1/Pipeline/egs-g800l-j141956p5255/Extractions/ --recursive --exclude "*" --include "egs-g800l-j141956p5255_[0-9]*"
aws s3 cp s3://grizli-v1/Pipeline/egs-grism-j141956p5255/Extractions/egs-grism-j141956p5255_phot_apcorr.fits s3://grizli-v1/Pipeline/egs-g800l-j141956p5255/Extractions/egs-g800l-j141956p5255_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('egs-g800l-j141956p5255', phot_root='egs-grism-j141956p5255', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
res = grizli_db.wait_on_db_update('egs-g800l-j141956p5255', dt=15, n_iter=120, engine=engine)
res = grizli_db.wait_on_db_update('uds-g800l-j021732m0512', dt=15, n_iter=120, engine=engine)
# UDS
copy = """
aws s3 rm s3://grizli-v1/Pipeline/uds-g800l-j021732m0512/Extractions/ --recursive --exclude "*" --include "uds-g800l-j021732m0512_[0-9]*"
aws s3 cp s3://grizli-v1/Pipeline/uds-grism-j021732m0512/Extractions/uds-grism-j021732m0512_phot_apcorr.fits s3://grizli-v1/Pipeline/uds-g800l-j021732m0512/Extractions/uds-g800l-j021732m0512_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('uds-g800l-j021732m0512', phot_root='uds-grism-j021732m0512', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
grizli_db.run_lambda_fits('egs-g800l-j141956p5255', phot_root='egs-grism-j141956p5255', min_status=6, zr=[0.01, 1.6], extra={'bad_pa_threshold': 10, 'use_phot_obj': False})
# Cosmos on oliveraws
copy = """
aws s3 rm s3://grizli-cosmos-v2/Pipeline/cos-grism-j100012p0210/Extractions/ --recursive --exclude "*" --include "cos-grism-j100012p0210_[0-9]*"
aws s3 cp /Users/gbrammer/Research/HST/Mosaics/Cosmos/cos-cnd-mosaic_phot_apcorr.fits s3://grizli-cosmos-v2/Pipeline/cos-grism-j100012p0210/Extractions/cos-grism-j100012p0210_phot_apcorr.fits --acl public-read
"""
grizli_db.run_lambda_fits('cos-grism-j100012p0210', min_status=6, zr=[0.01, 3.2], mag_limits=[17, 17.1], bucket='grizli-cosmos-v2')
os.system('sudo halt')
def set_column_formats(info, extra={}):
# Print formats
formats = {}
formats['ra'] = formats['dec'] = '.5f'
formats['mag_auto'] = formats['delta_z'] = '.2f'
formats['chinu'] = formats['chimin'] = formats['chimax'] = '.1f'
formats['bic_diff'] = formats['bic_temp'] = formats['bic_spl'] = '.1f'
formats['bic_poly'] = '.1f'
formats['dlinesn'] = formats['bic_spl'] = '.1f'
formats['flux_radius'] = formats['flux_radius_20'] = '.1f'
formats['flux_radius_90'] = '.1f'
formats['log_pdf_max'] = formats['log_risk'] = '.1f'
formats['d4000'] = formats['d4000_e'] = '.2f'
formats['dn4000'] = formats['dn4000_e'] = '.2f'
formats['z_spec'] = formats['z_map'] = formats['reshift'] = '.3f'
formats['z_spec_dr'] = '.1f'
formats['t_g141'] = formats['t_g102'] = formats['t_g800l'] = '.0f'
formats['zwidth1'] = formats['zw1'] = '.3f'
formats['zwidth2'] = formats['zw2'] = '.3f'
formats['q_z'] = '.2f'
formats['dz'] = '.3f'
for k in extra:
formats[k] = extra[k]
for c in info.colnames:
if c in formats:
info[c].format = formats[c]
elif c.startswith('sn_'):
info[c].format = '.1f'
elif c.startswith('mag_'):
info[c].format = '.2f'
elif '_ujy' in c:
info[c].format = '.2f'
elif c.startswith('ew_'):
info[c].format = '.1f'
elif ('q_z' in c):
info[c].format = '.2f'
elif ('zw' in c) | ('z_map' in c):
info[c].format = '.3f'
elif ('chinu' in c):
info[c].format = '.1f'
elif c.startswith('bic_'):
info[c].format = '.1f'
elif c in ['z02', 'z16', 'z50', 'z84', 'z97']:
info[c].format = '.3f'
elif c[:4] in ['splf', 'sple']:
info[c].format = '.1e'
elif c.startswith('flux_') | c.startswith('err_'):
info[c].format = '.1e'
def query_from_ds9(ds9, radius=5, engine=None, extra_cols=['mag_auto', 'z_map', 'bic_diff', 't_g800l', 't_g102', 't_g141'], extra_query='', table_root='/tmp/ds9_query'):
"""
Make a table by running a query for objects based on a DS9 pan position
"""
from grizli import utils, prep
if engine is None:
engine = get_db_engine(echo=False)
ra, dec = np.cast[float](ds9.get('pan fk5').split())
dd = radius/3600.
dr = dd/np.cos(dec/180*np.pi)
min_cols = ['root', 'id', 'status', 'ra', 'dec']
colstr = ','.join(min_cols + extra_cols)
q = from_sql(f'select {colstr} '
f'from redshift_fit natural join photometry_apcorr '
f'where ra > {ra-dr} AND ra < {ra+dr}'
f' AND dec > {dec-dd} and dec < {dec+dd}' + extra_query,
engine)
tt = utils.GTable()
tt['ra'] = [ra]
tt['dec'] = [dec]
_idx, _dr = tt.match_to_catalog_sky(q)
q['_dr'] = _dr
q['_dr'].format = '.2f'
so = np.argsort(q['_dr'])
make_html_table(sync=None, res=q[so], use_json=False, table_root=table_root, sort_column=('_dr', 1))
comment = [f'{id}' for id in q['id'][so]]
prep.table_to_regions(q[so], table_root+'.reg', comment=comment)
return q[so]
def make_html_table(engine=None, columns=['root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'log_pdf_max', 'd4000', 'd4000_e'], where="AND status >= 5 AND root='j163852p4039'", tables=[], table_root='query', sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], sort_column=('bic_diff', -1), fit_table='redshift_fit', verbose=True, get_sql=False, res=None, show_hist=False, extra_formats={}, use_json=True, use_join=False):
"""
"""
import time
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from grizli import utils
from grizli.aws import db as grizli_db
if engine is None:
engine = get_db_engine(echo=False)
if len(tables) > 0:
extra_tables = ','+','.join(tables)
else:
extra_tables = ''
if use_join:
query = "SELECT {0} FROM {1} NATURAL JOIN photometry_apcorr WHERE {2};".format(','.join(columns), fit_table, where)
query = query.replace('WHERE AND', 'AND')
else:
query = "SELECT {0} FROM photometry_apcorr, {3}{1} WHERE phot_root = p_root AND id = p_id {2};".format(','.join(columns), extra_tables, where, fit_table)
if get_sql:
return query
if res is not None:
info = res
else:
res = pd.read_sql_query(query, engine)
info = utils.GTable.from_pandas(res)
if verbose:
print('Query: {0}\n Results N={1}'.format(query, len(res)))
if 'cdf_z' in info.colnames:
info.remove_column('cdf_z')
for c in info.colnames:
if c.startswith('p_'):
try:
info.rename_column(c, c[2:])
except:
pass
all_columns = info.colnames.copy()
if 'idx' not in info.colnames:
idx = ['<a href="http://vizier.u-strasbg.fr/viz-bin/VizieR?-c={0:.6f}+{1:.6f}&-c.rs=2">#{2:05d}</a>'.format(info['ra'][i], info['dec'][i], info['id'][i]) for i in range(len(info))]
info['idx'] = idx
all_columns.insert(0, 'idx')
all_columns.pop(all_columns.index('id'))
set_column_formats(info, extra=extra_formats)
print('Sort: ', sort_column, sort_column[0] in all_columns)
if sort_column[0] in all_columns:
scol = info[sort_column[0]]
if hasattr(scol, 'mask'):
sdata = scol.filled(fill_value=-np.inf).data
else:
sdata = scol
so = np.argsort(sdata)[::sort_column[1]]
#info = info[so[::sort_column[1]]]
# PNG columns
AWS = 'https://s3.amazonaws.com/grizli-v1/Pipeline'
bucket = ['grizli-cosmos-v2' if r.startswith('cos-') else 'grizli-v1' for r in info['root']]
for ext in png_ext:
if ext == 'thumb':
subdir = 'Thumbnails'
print(ext, subdir)
elif ext == 'rgb':
subdir = 'Thumbnails'
else:
subdir = 'Extractions'
if 'png_{0}'.format(ext) not in info.colnames:
png = ['{0}_{1:05d}.{2}.png'.format(root, id, ext) for root, id in zip(info['root'], info['id'])]
if ext == 'rgb':
js = '<a href={0}/{2}><img src={0}/{1} onmouseover="this.src = this.src.replace(\'rgb.pn\', \'seg.pn\')" onmouseout="this.src = this.src.replace(\'seg.pn\', \'rgb.pn\')" height=200></a>'
paths = ['{0}/{1}/{2}'.format(AWS.replace('grizli-v1', buck),
root, subdir)
for buck, root in zip(bucket, info['root'])]
png_url = [js.format(path, p,
p.replace('.rgb.png', '.thumb.png'))
for path, p in zip(paths, png)]
info['png_{0}'.format('rgb')] = png_url
else:
info['png_{0}'.format(ext)] = ['<a href="{0}/{1}/{2}/{3}"><img src={0}/{1}/{2}/{3} height=200></a>'.format(AWS.replace('grizli-v1', buck), root, subdir, p) for buck, root, p in zip(bucket, info['root'], png)]
all_columns.append('png_{0}'.format(ext))
sortable = []
for c in all_columns:
if not hasattr(info[c][0], 'upper'):
sortable.append(c)
info[all_columns][so].write_sortable_html('{0}.html'.format(table_root), replace_braces=True, localhost=False, max_lines=1e5, table_id=None, table_class='display compact', css=None, filter_columns=sortable, buttons=['csv'], toggle=True, use_json=use_json)
if show_hist:
from matplotlib.ticker import FixedLocator, AutoLocator, MaxNLocator
xti = xt = np.arange(0, 3.6, 0.5)
loc = np.arange(0, 3.6, 0.1)
bins = utils.log_zgrid([0.03, 3.5], 0.01)
fig = plt.figure(figsize=[8, 4])
ax = fig.add_subplot(111)
ax.hist(np.log(1+res['z_map']), bins=np.log(1+bins), color='k',
alpha=0.2, label=table_root, normed=False)
clip = res['bic_diff'].values > 30
ax.hist(np.log(1+res['z_map'].values[clip]), bins=np.log(1+bins),
color='r', alpha=0.3, normed=False)
xts = ax.set_xticks(np.log(1+xt))
xtl = ax.set_xticklabels(xti)
ax.xaxis.set_minor_locator(FixedLocator(np.log(1+loc)))
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax.set_xlabel('z_map')
ax.set_ylabel(r'$N$')
# Label to show line mis-id
dz_wrong = (6563.-5007)/5007
ax.plot(np.arange(5)*dz_wrong, np.ones(5)*ax.get_ylim()[1], marker='.', markerfacecolor='w', markeredgecolor='w', color='r', markersize=10)
ax.set_xlim(0, np.log(1+3.7))
ax.grid()
ax.legend(loc='upper right')
fig.tight_layout(pad=0.1)
fig.text(1-0.02, 0.02, time.ctime(), ha='right', va='bottom', transform=fig.transFigure, fontsize=5)
fig.savefig('{0}_zhist.png'.format(table_root))
if sync:
os.system('aws s3 sync ./ {0} --exclude "*" --include "{1}.html" --include "{1}.json" --include "{1}_zhist.png" --acl public-read'.format(sync, table_root))
return res
def get_exposure_info():
"""
Get exposure information from the MAST databases
"""
import mastquery.query
master = 'grizli-v1-19.12.04'
tab = utils.read_catalog('{0}_visits.fits'.format(master))
all_visits = np.load('{0}_visits.npy'.format(master), allow_pickle=True)[0]
all_files = []
for v in all_visits:
all_files.extend(v['files'])
prog = [f[1:4] for f in all_files]
_res = np.unique(np.array(prog), return_counts=True)
t = utils.GTable()
t['prog'] = _res[0]
t['count'] = _res[1]
so = np.argsort(t['count'])
t = t[so[::-1]]
for pr in t['prog']:
print(pr)
if os.path.exists('{0}_query.fits'.format(pr)):
continue
try:
_q = mastquery.query.run_query(obs_id='[ij]{0}*'.format(pr))
_p = mastquery.query.get_products_table(_q)
except:
continue
_q.write('{0}_query.fits'.format(pr))
_p.write('{0}_prod.fits'.format(pr))
# Send to AWS
from grizli.aws import db
import pandas as pd
from astropy.table import Table
engine = db.get_db_engine()
files = glob.glob('*query.fits')
files.sort()
cols = ['obs_id', 'target', 'ra', 'dec', 't_min', 't_max', 'exptime', 'wavelength_region', 'filter', 'em_min', 'em_max', 'target_classification', 'obs_title', 't_obs_release', 'instrument_name', 'proposal_pi', 'proposal_id', 'proposal_type', 'footprint', 'dataRights', 'mtFlag', 'obsid', 'objID', 'visit']
for i, file in enumerate(files):
print(file)
_q = Table.read(file, character_as_bytes=False)
_q['proposal_id'] = np.cast[np.int16](_q['proposal_id'])
_q['obsid'] = np.cast[np.int64](_q['obsid'])
_q['objID'] = np.cast[np.int64](_q['objID'])
df = _q[cols].to_pandas()
df.to_sql('mast_query', engine, index=False, if_exists='append', method='multi')
files = glob.glob('*_prod.fits')
files.sort()
cols = ['obsid', 'dataset']
for i, file in enumerate(files):
print(i, file)
_p = Table.read(file, character_as_bytes=False)
_p['obsid'] = np.cast[np.int64](_p['obsid'])
_p['dataset'] = [d[:-1] for d in _p['observation_id']]
df = _p[cols].to_pandas()
df.to_sql('mast_products', engine, index=False, if_exists='append', method='multi')
##########
# Exposure log
# Initialize, adding an array column manually for the footprints
v = all_visits[0]
N = len(v['files'])
fps = [np.array(fp.convex_hull.boundary.xy)[:, :-1].tolist() for fp in v['footprints']]
df = pd.DataFrame()
df['file'] = [f.split('_')[0] for f in v['files']]
df['dataset'] = [f.split('_')[0][:-1] for f in v['files']]
df['extension'] = [f.split('_')[1][:3] for f in v['files']]
df['filter'] = v['filter']
df['parent'] = v['parent']
df['awspath'] = v['awspath']
df['product'] = v['product']
df['filter'] = v['product'].split('-')[-1]
df['ra'] = [fp.centroid.xy[0][0] for fp in v['footprints']]
df['dec'] = [fp.centroid.xy[1][0] for fp in v['footprints']]
df['area'] = [fp.area*np.cos(df['dec'][i]/180*np.pi)*3600 for i, fp in enumerate(v['footprints'])]
# Make table
engine.execute('drop table exposure_log;')
df.to_sql('exposure_log', engine, index=False, if_exists='append', method='multi')
engine.execute('alter table exposure_log add column footprint float [];')
engine.execute('delete from exposure_log where True;')
SKIP = 1000
for i, v in enumerate(all_visits):
print(i, v['parent'], v['product'])
N = len(v['files'])
fps = [np.array(fp.convex_hull.boundary.xy)[:, :-1].tolist() for fp in v['footprints']]
if (i % SKIP) == 0:
df0 = df[:0]
df = pd.DataFrame()
df['file'] = [f.split('_')[0] for f in v['files']]
df['dataset'] = [f.split('_')[0][:-1] for f in v['files']]
df['extension'] = [f.split('_')[1][:3] for f in v['files']]
df['filter'] = v['filter']
df['parent'] = v['parent']
df['awspath'] = v['awspath']
df['product'] = v['product']
df['filter'] = v['product'].split('-')[-1]
df['ra'] = [fp.centroid.xy[0][0] for fp in v['footprints']]
df['dec'] = [fp.centroid.xy[1][0] for fp in v['footprints']]
df['area'] = [fp.area*np.cos(df['dec'][i]/180*np.pi)*3600 for i, fp in enumerate(v['footprints'])]
df['footprint'] = fps
if (i % SKIP) > 0:
df0 = df0.append(df)
if (i % SKIP) == SKIP-1:
print('>>> to DB >>> ({0}, {1})'.format(i, len(df0)))
df0.to_sql('exposure_log', engine, index=False, if_exists='append', method='multi')
def get_exposures_at_position(ra, dec, engine, dr=10):
cosdec = np.cos(dec/180*np.pi)
res = db.from_sql('select * from exposure_log where (ABS(ra - {0}) < {1}) AND (ABS(dec-{2}) < {3})'.format(ra, dr/cosdec, dec, dr), engine)
return res
def add_irac_table():
from scipy.spatial import ConvexHull
os.chdir('/Users/gbrammer/Research/HST/CHArGE/FieldsSummary')
files = glob.glob('*ipac.fits')
files.sort()
bands = ['IRAC 3.6um', 'IRAC 4.5um', 'IRAC 5.8um', 'IRAC 8.0um', 'MIPS 24um']
bkey = {}
for b in bands:
key = b.replace(' ', '').replace('.', '')[:-2].lower()
bkey[key] = b
N = 0
data = {'field_root': []}
aor_data = {'field_root': [], 'reqkey': []}
for k in bkey:
data['exp_'+k] = []
data['n_'+k] = []
data['fp_'+k] = []
for i, file in enumerate(files):
tab = utils.read_catalog(file)
field = file.split('_ipac')[0]
if 'x' in tab.colnames:
data['field_root'].append(field)
for k in bkey:
data['exp_'+k].append(0)
data['n_'+k].append(0)
data['fp_'+k].append([])
continue
N += len(tab)
print(i, file, N)
data['field_root'].append(field)
for k in bkey:
sel = tab['with_hst'] & (tab['wavelength'] == bkey[k])
data['exp_'+k].append(tab['exposuretime'][sel].sum()/3600)
data['n_'+k].append(sel.sum())
if sel.sum() == 0:
data['fp_'+k].append([])
continue
r, d = [], []
for j in range(4):
r.extend(tab['ra{0}'.format(j+1)][sel].data)
d.extend(tab['dec{0}'.format(j+1)][sel].data)
pts = np.array([r, d]).T
vert = ConvexHull(pts).vertices
fp = pts[vert, :]
data['fp_'+k].append(fp.T.tolist())
aors = np.unique(tab['reqkey'])
aor_data['field_root'].extend([field]*len(aors))
aor_data['reqkey'].extend(list(aors))
#
import pandas as pd
df = pd.DataFrame(aor_data)
df.to_sql('spitzer_aors', engine, index=False, if_exists='append', method='multi')
df = pd.DataFrame(data)
# First row to initialize table
first = df[0:1]
for k in bkey:
first.pop('fp_'+k)
engine.execute('drop table exposure_log;')
first.to_sql('spitzer_log', engine, index=False, if_exists='append', method='multi')
for k in bkey:
cmd = 'alter table spitzer_log add column fp_{0} float [];'.format(k)
engine.execute(cmd)
engine.execute('delete from spitzer_log where True;')
df.to_sql('spitzer_log', engine, index=False, if_exists='append', method='multi')
def show_all_fields():
plt.ioff()
res = pd.read_sql_query("select distinct root from redshift_fit order by root;", engine)
roots = res['root'].tolist()
for root in roots:
print('\n\n', root, '\n\n')
if os.path.exists('{0}_zhist.png'.format(root)):
continue
try:
if False:
res = pd.read_sql_query("select root,id,status from redshift_fit where root = '{0}';".format(root), engine)
res = pd.read_sql_query("select status, count(status) as n from redshift_fit where root = '{0}' group by status;".format(root), engine)
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'dlinesn', 'q_z'], where="AND status > 4 AND root = '{0}'".format(root), table_root=root, sync='s3://grizli-v1/Pipeline/{0}/Extractions/'.format(root), png_ext=['R30', 'stack', 'full', 'line'], show_hist=True)
if False:
grizli_db.set_phot_root(root, phot_root, engine)
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND root = '{0}' AND (bic_diff > 20 OR zwidth1/(1+z_map) < 0.01)".format(root), table_root=root, sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line', 'sed'], show_hist=False)
res = grizli_db.make_html_table(engine=engine, columns=['mtime', 'root', 'status', 'id', 'p_ra', 'p_dec', 'mag_auto', 'flux_radius', 't_g800l', 't_g102', 't_g141', 'z_spec', 'z_map', 'bic_diff', 'chinu', 'zwidth1/(1+z_map) as zw1', 'dlinesn'], where="AND status > 4 AND phot_root = '{0}' AND bic_diff > 20".format(phot_root), table_root=phot_root, sync='s3://grizli-v1/tables/', png_ext=['R30', 'stack', 'full', 'line'], show_hist=False)
except:
continue
os.system('aws s3 cp s3://grizli-v1/Pipeline/{0}/Extractions/{0}_zhist.png s3://grizli-v1/tables/'.format(root))
| 43.497122 | 2,914 | 0.6076 |
aceef70e4145f427a39a82811cf0b81bca133615 | 1,239 | py | Python | src/encoded/types/bioreplicate.py | utsw-bicf/pandiseased | ecb2c305a5c4bf468b0964137984d1800c798f01 | [
"MIT"
] | 1 | 2019-07-18T21:57:10.000Z | 2019-07-18T21:57:10.000Z | src/encoded/types/bioreplicate.py | utsw-bicf/pandiseased | ecb2c305a5c4bf468b0964137984d1800c798f01 | [
"MIT"
] | 321 | 2019-08-20T19:32:17.000Z | 2021-10-15T20:00:02.000Z | src/encoded/types/bioreplicate.py | utsw-bicf/pandiseased | ecb2c305a5c4bf468b0964137984d1800c798f01 | [
"MIT"
] | 2 | 2019-09-20T19:58:08.000Z | 2020-01-28T15:04:31.000Z | from snovault import (
calculated_property,
collection,
load_schema,
)
from pyramid.security import (
Allow,
Deny,
Everyone,
)
from .base import (
ALLOW_SUBMITTER_ADD,
Item,
paths_filtered_by_status,
)
import re
@collection(
name='bioreplicates',
acl=ALLOW_SUBMITTER_ADD,
properties={
'title': 'Bioreplicates',
'description': 'Listing of BioReplicates',
})
class Bioreplicate(Item):
item_type = 'bioreplicate'
schema = load_schema('encoded:schemas/bioreplicate.json')
rev = {
'biofile': ('Biofile', 'bioreplicate'),
}
embedded = [
'bioexperiment',
'biolibrary',
'biolibrary.biospecimen',
'biofile'
]
audit_inherit = [
]
set_status_up = [
]
set_status_down = []
STATUS_ACL = {
'released': [(Allow, 'group.verification', ['view_details'])]
}
@calculated_property(
schema={
"title": "Biofile",
"type": "array",
"items": {
"type": 'string',
"linkTo": "Biofile"
},
}
)
def biofile(self, request, biofile):
return paths_filtered_by_status(request, biofile)
| 20.65 | 69 | 0.560936 |
aceef7cd14e6132e8560a0d1f356286ade861bd7 | 6,013 | py | Python | bigtable/snippets/reads/read_snippets.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 1 | 2020-05-07T02:21:17.000Z | 2020-05-07T02:21:17.000Z | bigtable/snippets/reads/read_snippets.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | null | null | null | bigtable/snippets/reads/read_snippets.py | summersab/python-docs-samples | 7c1e9685fe190f7789d8e1dbcfe8c01a20e3dc66 | [
"Apache-2.0"
] | 1 | 2021-10-23T01:22:02.000Z | 2021-10-23T01:22:02.000Z | #!/usr/bin/env python
# Copyright 2020, Google LLC
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# [START bigtable_reads_row]
# [START bigtable_reads_row_partial]
# [START bigtable_reads_rows]
# [START bigtable_reads_row_range]
# [START bigtable_reads_row_ranges]
# [START bigtable_reads_prefix]
# [START bigtable_reads_filter]
from google.cloud import bigtable
# [END bigtable_reads_row]
# [END bigtable_reads_row_partial]
# [END bigtable_reads_rows]
# [END bigtable_reads_row_range]
# [END bigtable_reads_row_ranges]
# [END bigtable_reads_prefix]
# [END bigtable_reads_filter]
# [START bigtable_reads_row_partial]
# [START bigtable_reads_filter]
import google.cloud.bigtable.row_filters as row_filters
# [END bigtable_reads_row_partial]
# [END bigtable_reads_filter]
# [START bigtable_reads_rows]
# [START bigtable_reads_row_range]
# [START bigtable_reads_row_ranges]
# [START bigtable_reads_prefix]
from google.cloud.bigtable.row_set import RowSet
# [END bigtable_reads_rows]
# [END bigtable_reads_row_range]
# [END bigtable_reads_row_ranges]
# [END bigtable_reads_prefix]
# [START bigtable_reads_row]
def read_row(project_id, instance_id, table_id):
client = bigtable.Client(project=project_id, admin=True)
instance = client.instance(instance_id)
table = instance.table(table_id)
row_key = "phone#4c410523#20190501"
row = table.read_row(row_key)
print_row(row)
# [END bigtable_reads_row]
# [START bigtable_reads_row_partial]
def read_row_partial(project_id, instance_id, table_id):
client = bigtable.Client(project=project_id, admin=True)
instance = client.instance(instance_id)
table = instance.table(table_id)
row_key = "phone#4c410523#20190501"
col_filter = row_filters.ColumnQualifierRegexFilter(b'os_build')
row = table.read_row(row_key, filter_=col_filter)
print_row(row)
# [END bigtable_reads_row_partial]
# [START bigtable_reads_rows]
def read_rows(project_id, instance_id, table_id):
client = bigtable.Client(project=project_id, admin=True)
instance = client.instance(instance_id)
table = instance.table(table_id)
row_set = RowSet()
row_set.add_row_key(b"phone#4c410523#20190501")
row_set.add_row_key(b"phone#4c410523#20190502")
rows = table.read_rows(row_set=row_set)
for row in rows:
print_row(row)
# [END bigtable_reads_rows]
# [START bigtable_reads_row_range]
def read_row_range(project_id, instance_id, table_id):
client = bigtable.Client(project=project_id, admin=True)
instance = client.instance(instance_id)
table = instance.table(table_id)
row_set = RowSet()
row_set.add_row_range_from_keys(
start_key=b"phone#4c410523#20190501",
end_key=b"phone#4c410523#201906201")
rows = table.read_rows(row_set=row_set)
for row in rows:
print_row(row)
# [END bigtable_reads_row_range]
# [START bigtable_reads_row_ranges]
def read_row_ranges(project_id, instance_id, table_id):
client = bigtable.Client(project=project_id, admin=True)
instance = client.instance(instance_id)
table = instance.table(table_id)
row_set = RowSet()
row_set.add_row_range_from_keys(
start_key=b"phone#4c410523#20190501",
end_key=b"phone#4c410523#201906201")
row_set.add_row_range_from_keys(
start_key=b"phone#5c10102#20190501",
end_key=b"phone#5c10102#201906201")
rows = table.read_rows(row_set=row_set)
for row in rows:
print_row(row)
# [END bigtable_reads_row_ranges]
# [START bigtable_reads_prefix]
def read_prefix(project_id, instance_id, table_id):
client = bigtable.Client(project=project_id, admin=True)
instance = client.instance(instance_id)
table = instance.table(table_id)
prefix = "phone#"
end_key = prefix[:-1] + chr(ord(prefix[-1]) + 1)
row_set = RowSet()
row_set.add_row_range_from_keys(prefix.encode("utf-8"),
end_key.encode("utf-8"))
rows = table.read_rows(row_set=row_set)
for row in rows:
print_row(row)
# [END bigtable_reads_prefix]
# [START bigtable_reads_filter]
def read_filter(project_id, instance_id, table_id):
client = bigtable.Client(project=project_id, admin=True)
instance = client.instance(instance_id)
table = instance.table(table_id)
rows = table.read_rows(filter_=row_filters.ValueRegexFilter(b"PQ2A.*$"))
for row in rows:
print_row(row)
# [END bigtable_reads_filter]
# [START bigtable_reads_row]
# [START bigtable_reads_row_partial]
# [START bigtable_reads_rows]
# [START bigtable_reads_row_range]
# [START bigtable_reads_row_ranges]
# [START bigtable_reads_prefix]
# [START bigtable_reads_filter]
def print_row(row):
print("Reading data for {}:".format(row.row_key.decode('utf-8')))
for cf, cols in sorted(row.cells.items()):
print("Column Family {}".format(cf))
for col, cells in sorted(cols.items()):
for cell in cells:
labels = " [{}]".format(",".join(cell.labels)) \
if len(cell.labels) else ""
print(
"\t{}: {} @{}{}".format(col.decode('utf-8'),
cell.value.decode('utf-8'),
cell.timestamp, labels))
print("")
# [END bigtable_reads_row]
# [END bigtable_reads_row_partial]
# [END bigtable_reads_rows]
# [END bigtable_reads_row_range]
# [END bigtable_reads_row_ranges]
# [END bigtable_reads_prefix]
# [END bigtable_reads_filter]
| 31.15544 | 76 | 0.716281 |
aceef9447402b0faefdb3491cedbbcbf5422ff6b | 2,064 | py | Python | anvil/plugins/maya/load/ava_rig.py | icyvapor/config | 0b367f02da9cf5c7dcfb5253d99da2f1dfc5e018 | [
"MIT"
] | null | null | null | anvil/plugins/maya/load/ava_rig.py | icyvapor/config | 0b367f02da9cf5c7dcfb5253d99da2f1dfc5e018 | [
"MIT"
] | null | null | null | anvil/plugins/maya/load/ava_rig.py | icyvapor/config | 0b367f02da9cf5c7dcfb5253d99da2f1dfc5e018 | [
"MIT"
] | null | null | null | import avalon.maya
class RigLoader(avalon.maya.Loader):
"""Specific loader for rigs
This automatically creates an instance for animators upon load.
"""
families = ["anvil.rig"]
representations = ["ma"]
def process(self, name, namespace, context, data):
from maya import cmds
from avalon import api
nodes = cmds.file(self.fname,
namespace=namespace,
reference=True,
returnNewNodes=True,
groupReference=True,
groupName=namespace + ":" + name)
# Store for post-process
self[:] = nodes
# Trigger post process only if it's not been set to disabled
if data.get("post_process", True):
# TODO(marcus): We are hardcoding the name "out_SET" here.
# Better register this keyword, so that it can be used
# elsewhere, such as in the Integrator plug-in,
# without duplication.
output = next(
(node for node in self
if node.endswith("out_SET")), None)
controls = next(
(node for node in self
if node.endswith("controls_SET")), None)
assert output, "No out_SET in rig, this is a bug."
assert controls, "No controls_SET in rig, this is a bug."
cmds.select([output, controls], noExpand=True)
dependencies = [context["representation"]["_id"]]
asset = context["asset"]["name"] + "_"
avalon.maya.create(
name=avalon.maya.unique_name(asset),
# Publish to the currently set asset, and not the
# asset from which the Rig was produced.
asset=api.Session["AVALON_ASSET"],
family="anvil.animation",
options={"useSelection": True},
data={
"dependencies": " ".join(str(d) for d in dependencies)
})
| 33.836066 | 74 | 0.528101 |
aceef95125d6d2093c88299933965e7da376694a | 3,148 | py | Python | notebooks/109.0-BDP-hypergraph-cascade.py | zeou1/maggot_models | 4e1b518c2981ab1ca9607099c3813e8429d94ca4 | [
"BSD-3-Clause"
] | null | null | null | notebooks/109.0-BDP-hypergraph-cascade.py | zeou1/maggot_models | 4e1b518c2981ab1ca9607099c3813e8429d94ca4 | [
"BSD-3-Clause"
] | null | null | null | notebooks/109.0-BDP-hypergraph-cascade.py | zeou1/maggot_models | 4e1b518c2981ab1ca9607099c3813e8429d94ca4 | [
"BSD-3-Clause"
] | null | null | null | # %% [markdown]
# #
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from ast import literal_eval
import pymaid
from src.data import load_metagraph
from src.io import savecsv, savefig, saveskels
from src.pymaid import start_instance
from itertools import chain
FNAME = os.path.basename(__file__)[:-3]
print(FNAME)
def stashfig(name, **kws):
savefig(name, foldername=FNAME, save_on=True, **kws)
VERSION = "2020-03-09"
print(f"Using version {VERSION}")
mg = load_metagraph("G", version=VERSION)
connectors = pd.read_csv(
"maggot_models/data/processed/2020-03-09/connectors.csv",
index_col=0,
dtype={"presynaptic_to": int, "presynaptic_to_node": int},
converters={"postsynaptic_to": literal_eval, "postsynaptic_to_node": literal_eval},
)
# %% [markdown]
# #
class HypergraphCascade:
def __init__(self, connectors, store_fmt="hist", max_depth=10, p=0.01):
self.connectors = connectors
self.store_fmt = store_fmt
self.max_depth = max_depth
self.p = p
def start_cascade(self, start_ids):
self._active_ids = [start_ids]
self.hops = 0
def step_cascade(self):
self.hops += 1
connectors = self.connectors
# TODO this should be a dictionary keyed by cell ID or index
candidate_syns = connectors[connectors["presynaptic_to"].isin(self._active_ids)]
n_syns = len(candidate_syns)
# simulate a bernoulli for each synapse
outcomes = np.random.uniform(size=n_syns)
inds = np.zeros(n_syns, dtype=bool)
inds[outcomes < self.p] = True
self._active_syns = candidate_syns.iloc[inds]
next_ids = self._active_syns["postsynaptic_to"]
next_ids = list(chain.from_iterable(next_ids))
next_ids = np.unique(next_ids)
self._active_ids = next_ids
import seaborn as sns
def plot_syns(connector_df):
pg = sns.PairGrid(data=connector_df, x_vars=["x", "y", "z"], y_vars=["x", "y", "z"])
pg.map(sns.scatterplot, alpha=0.02, linewidth=0, s=1)
n_sims = 20
syn_lists = []
for i in range(n_sims):
syns = []
hgc = HypergraphCascade(connectors)
hgc.start_cascade(3299214)
hgc.step_cascade()
print(hgc.hops)
print(hgc._active_ids)
syns.append(hgc._active_syns)
hgc.step_cascade()
print(hgc.hops)
print(hgc._active_ids)
syns.append(hgc._active_syns)
hgc.step_cascade()
print(hgc.hops)
print(hgc._active_ids)
syns.append(hgc._active_syns)
syn_lists.append(syns)
hgc.step_cascade()
print(hgc.hops)
print(hgc._active_ids)
syns.append(hgc._active_syns)
syn_lists.append(syns)
new_syns = []
for syns in syn_lists:
for hops, syn in enumerate(syns):
syn["hops"] = hops + 1
new_syns.append(syn)
syn_df = pd.concat(new_syns, axis=0, ignore_index=True)
pg = sns.PairGrid(
data=syn_df,
x_vars=["x", "y", "z"],
y_vars=["x", "y", "z"],
hue="hops",
palette="plasma",
)
pg.map_offdiag(sns.scatterplot, alpha=1, linewidth=0, s=10)
# %% [markdown]
# # The plan - general cascade model
# base eclass
| 24.59375 | 88 | 0.670902 |
aceef9c61ab029bce2d4ede1b4f7004cdac35957 | 53,457 | py | Python | awswrangler/redshift.py | njdanielsen/aws-data-wrangler | 5cdb316224370e952dfb3a701825e1b1ab331105 | [
"Apache-2.0"
] | null | null | null | awswrangler/redshift.py | njdanielsen/aws-data-wrangler | 5cdb316224370e952dfb3a701825e1b1ab331105 | [
"Apache-2.0"
] | null | null | null | awswrangler/redshift.py | njdanielsen/aws-data-wrangler | 5cdb316224370e952dfb3a701825e1b1ab331105 | [
"Apache-2.0"
] | null | null | null | """Amazon Redshift Module."""
import logging
import uuid
from typing import Any, Dict, Iterator, List, Optional, Tuple, Union
import boto3
import botocore
import pandas as pd
import pyarrow as pa
import redshift_connector
from awswrangler import _data_types
from awswrangler import _databases as _db_utils
from awswrangler import _utils, exceptions, s3
_logger: logging.Logger = logging.getLogger(__name__)
_RS_DISTSTYLES: List[str] = ["AUTO", "EVEN", "ALL", "KEY"]
_RS_SORTSTYLES: List[str] = ["COMPOUND", "INTERLEAVED"]
def _validate_connection(con: redshift_connector.Connection) -> None:
if not isinstance(con, redshift_connector.Connection):
raise exceptions.InvalidConnection(
"Invalid 'conn' argument, please pass a "
"redshift_connector.Connection object. Use redshift_connector.connect() to use "
"credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog."
)
def _drop_table(cursor: redshift_connector.Cursor, schema: Optional[str], table: str) -> None:
schema_str = f'"{schema}".' if schema else ""
sql = f'DROP TABLE IF EXISTS {schema_str}"{table}"'
_logger.debug("Drop table query:\n%s", sql)
cursor.execute(sql)
def _get_primary_keys(cursor: redshift_connector.Cursor, schema: str, table: str) -> List[str]:
cursor.execute(f"SELECT indexdef FROM pg_indexes WHERE schemaname = '{schema}' AND tablename = '{table}'")
result: str = cursor.fetchall()[0][0]
rfields: List[str] = result.split("(")[1].strip(")").split(",")
fields: List[str] = [field.strip().strip('"') for field in rfields]
return fields
def _does_table_exist(cursor: redshift_connector.Cursor, schema: Optional[str], table: str) -> bool:
schema_str = f"TABLE_SCHEMA = '{schema}' AND" if schema else ""
cursor.execute(
f"SELECT true WHERE EXISTS ("
f"SELECT * FROM INFORMATION_SCHEMA.TABLES WHERE "
f"{schema_str} TABLE_NAME = '{table}'"
f");"
)
return len(cursor.fetchall()) > 0
def _make_s3_auth_string(
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
iam_role: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
) -> str:
if aws_access_key_id is not None and aws_secret_access_key is not None:
auth_str: str = f"ACCESS_KEY_ID '{aws_access_key_id}'\nSECRET_ACCESS_KEY '{aws_secret_access_key}'\n"
if aws_session_token is not None:
auth_str += f"SESSION_TOKEN '{aws_session_token}'\n"
elif iam_role is not None:
auth_str = f"IAM_ROLE '{iam_role}'\n"
else:
_logger.debug("Attempting to get S3 authorization credentials from boto3 session.")
credentials: botocore.credentials.ReadOnlyCredentials
credentials = _utils.get_credentials_from_session(boto3_session=boto3_session)
if credentials.access_key is None or credentials.secret_key is None:
raise exceptions.InvalidArgument(
"One of IAM Role or AWS ACCESS_KEY_ID and SECRET_ACCESS_KEY must be "
"given. Unable to find ACCESS_KEY_ID and SECRET_ACCESS_KEY in boto3 "
"session."
)
auth_str = f"ACCESS_KEY_ID '{credentials.access_key}'\nSECRET_ACCESS_KEY '{credentials.secret_key}'\n"
if credentials.token is not None:
auth_str += f"SESSION_TOKEN '{credentials.token}'\n"
return auth_str
def _copy(
cursor: redshift_connector.Cursor,
path: str,
table: str,
serialize_to_json: bool,
iam_role: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
boto3_session: Optional[str] = None,
schema: Optional[str] = None,
) -> None:
if schema is None:
table_name: str = f'"{table}"'
else:
table_name = f'"{schema}"."{table}"'
auth_str: str = _make_s3_auth_string(
iam_role=iam_role,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
boto3_session=boto3_session,
)
ser_json_str: str = " SERIALIZETOJSON" if serialize_to_json else ""
sql: str = f"COPY {table_name}\nFROM '{path}' {auth_str}\nFORMAT AS PARQUET{ser_json_str}"
_logger.debug("copy query:\n%s", sql)
cursor.execute(sql)
def _upsert(
cursor: redshift_connector.Cursor,
table: str,
temp_table: str,
schema: str,
primary_keys: Optional[List[str]] = None,
) -> None:
if not primary_keys:
primary_keys = _get_primary_keys(cursor=cursor, schema=schema, table=table)
_logger.debug("primary_keys: %s", primary_keys)
if not primary_keys:
raise exceptions.InvalidRedshiftPrimaryKeys()
equals_clause: str = f"{table}.%s = {temp_table}.%s"
join_clause: str = " AND ".join([equals_clause % (pk, pk) for pk in primary_keys])
sql: str = f'DELETE FROM "{schema}"."{table}" USING {temp_table} WHERE {join_clause}'
_logger.debug(sql)
cursor.execute(sql)
sql = f"INSERT INTO {schema}.{table} SELECT * FROM {temp_table}"
_logger.debug(sql)
cursor.execute(sql)
_drop_table(cursor=cursor, schema=schema, table=temp_table)
def _validate_parameters(
redshift_types: Dict[str, str],
diststyle: str,
distkey: Optional[str],
sortstyle: str,
sortkey: Optional[List[str]],
) -> None:
if diststyle not in _RS_DISTSTYLES:
raise exceptions.InvalidRedshiftDiststyle(f"diststyle must be in {_RS_DISTSTYLES}")
cols = list(redshift_types.keys())
_logger.debug("Redshift columns: %s", cols)
if (diststyle == "KEY") and (not distkey):
raise exceptions.InvalidRedshiftDistkey("You must pass a distkey if you intend to use KEY diststyle")
if distkey and distkey not in cols:
raise exceptions.InvalidRedshiftDistkey(f"distkey ({distkey}) must be in the columns list: {cols})")
if sortstyle and sortstyle not in _RS_SORTSTYLES:
raise exceptions.InvalidRedshiftSortstyle(f"sortstyle must be in {_RS_SORTSTYLES}")
if sortkey:
if not isinstance(sortkey, list):
raise exceptions.InvalidRedshiftSortkey(
f"sortkey must be a List of items in the columns list: {cols}. " f"Currently value: {sortkey}"
)
for key in sortkey:
if key not in cols:
raise exceptions.InvalidRedshiftSortkey(
f"sortkey must be a List of items in the columns list: {cols}. " f"Currently value: {key}"
)
def _redshift_types_from_path(
path: Optional[Union[str, List[str]]],
varchar_lengths_default: int,
varchar_lengths: Optional[Dict[str, int]],
parquet_infer_sampling: float,
path_suffix: Optional[str],
path_ignore_suffix: Optional[str],
use_threads: bool,
boto3_session: Optional[boto3.Session],
s3_additional_kwargs: Optional[Dict[str, str]],
) -> Dict[str, str]:
"""Extract Redshift data types from a Pandas DataFrame."""
_varchar_lengths: Dict[str, int] = {} if varchar_lengths is None else varchar_lengths
session: boto3.Session = _utils.ensure_session(session=boto3_session)
_logger.debug("Scanning parquet schemas on s3...")
athena_types, _ = s3.read_parquet_metadata(
path=path,
sampling=parquet_infer_sampling,
path_suffix=path_suffix,
path_ignore_suffix=path_ignore_suffix,
dataset=False,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
)
_logger.debug("athena_types: %s", athena_types)
redshift_types: Dict[str, str] = {}
for col_name, col_type in athena_types.items():
length: int = _varchar_lengths[col_name] if col_name in _varchar_lengths else varchar_lengths_default
redshift_types[col_name] = _data_types.athena2redshift(dtype=col_type, varchar_length=length)
return redshift_types
def _create_table(
df: Optional[pd.DataFrame],
path: Optional[Union[str, List[str]]],
cursor: redshift_connector.Cursor,
table: str,
schema: str,
mode: str,
index: bool,
dtype: Optional[Dict[str, str]],
diststyle: str,
sortstyle: str,
distkey: Optional[str],
sortkey: Optional[List[str]],
primary_keys: Optional[List[str]],
varchar_lengths_default: int,
varchar_lengths: Optional[Dict[str, int]],
parquet_infer_sampling: float = 1.0,
path_suffix: Optional[str] = None,
path_ignore_suffix: Optional[str] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Tuple[str, Optional[str]]:
if mode == "overwrite":
_drop_table(cursor=cursor, schema=schema, table=table)
elif _does_table_exist(cursor=cursor, schema=schema, table=table) is True:
if mode == "upsert":
guid: str = uuid.uuid4().hex
temp_table: str = f"temp_redshift_{guid}"
sql: str = f'CREATE TEMPORARY TABLE {temp_table} (LIKE "{schema}"."{table}")'
_logger.debug(sql)
cursor.execute(sql)
return temp_table, None
return table, schema
diststyle = diststyle.upper() if diststyle else "AUTO"
sortstyle = sortstyle.upper() if sortstyle else "COMPOUND"
if df is not None:
redshift_types: Dict[str, str] = _data_types.database_types_from_pandas(
df=df,
index=index,
dtype=dtype,
varchar_lengths_default=varchar_lengths_default,
varchar_lengths=varchar_lengths,
converter_func=_data_types.pyarrow2redshift,
)
elif path is not None:
redshift_types = _redshift_types_from_path(
path=path,
varchar_lengths_default=varchar_lengths_default,
varchar_lengths=varchar_lengths,
parquet_infer_sampling=parquet_infer_sampling,
path_suffix=path_suffix,
path_ignore_suffix=path_ignore_suffix,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
)
else:
raise ValueError("df and path are None.You MUST pass at least one.")
_validate_parameters(
redshift_types=redshift_types,
diststyle=diststyle,
distkey=distkey,
sortstyle=sortstyle,
sortkey=sortkey,
)
cols_str: str = "".join([f"{k} {v},\n" for k, v in redshift_types.items()])[:-2]
primary_keys_str: str = f",\nPRIMARY KEY ({', '.join(primary_keys)})" if primary_keys else ""
distkey_str: str = f"\nDISTKEY({distkey})" if distkey and diststyle == "KEY" else ""
sortkey_str: str = f"\n{sortstyle} SORTKEY({','.join(sortkey)})" if sortkey else ""
sql = (
f'CREATE TABLE IF NOT EXISTS "{schema}"."{table}" (\n'
f"{cols_str}"
f"{primary_keys_str}"
f")\nDISTSTYLE {diststyle}"
f"{distkey_str}"
f"{sortkey_str}"
)
_logger.debug("Create table query:\n%s", sql)
cursor.execute(sql)
return table, schema
def _read_parquet_iterator(
path: str,
keep_files: bool,
use_threads: bool,
categories: Optional[List[str]],
chunked: Union[bool, int],
boto3_session: Optional[boto3.Session],
s3_additional_kwargs: Optional[Dict[str, str]],
) -> Iterator[pd.DataFrame]:
dfs: Iterator[pd.DataFrame] = s3.read_parquet(
path=path,
categories=categories,
chunked=chunked,
dataset=False,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
)
yield from dfs
if keep_files is False:
s3.delete_objects(
path=path, use_threads=use_threads, boto3_session=boto3_session, s3_additional_kwargs=s3_additional_kwargs
)
def connect(
connection: Optional[str] = None,
secret_id: Optional[str] = None,
catalog_id: Optional[str] = None,
dbname: Optional[str] = None,
boto3_session: Optional[boto3.Session] = None,
ssl: bool = True,
timeout: Optional[int] = None,
max_prepared_statements: int = 1000,
tcp_keepalive: bool = True,
) -> redshift_connector.Connection:
"""Return a redshift_connector connection from a Glue Catalog or Secret Manager.
Note
----
You MUST pass a `connection` OR `secret_id`
https://github.com/aws/amazon-redshift-python-driver
Parameters
----------
connection : Optional[str]
Glue Catalog Connection name.
secret_id: Optional[str]:
Specifies the secret containing the version that you want to retrieve.
You can specify either the Amazon Resource Name (ARN) or the friendly name of the secret.
catalog_id : str, optional
The ID of the Data Catalog.
If none is provided, the AWS account ID is used by default.
dbname: Optional[str]
Optional database name to overwrite the stored one.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
ssl: bool
This governs SSL encryption for TCP/IP sockets.
This parameter is forward to redshift_connector.
https://github.com/aws/amazon-redshift-python-driver
timeout: Optional[int]
This is the time in seconds before the connection to the server will time out.
The default is None which means no timeout.
This parameter is forward to redshift_connector.
https://github.com/aws/amazon-redshift-python-driver
max_prepared_statements: int
This parameter is forward to redshift_connector.
https://github.com/aws/amazon-redshift-python-driver
tcp_keepalive: bool
If True then use TCP keepalive. The default is True.
This parameter is forward to redshift_connector.
https://github.com/aws/amazon-redshift-python-driver
Returns
-------
redshift_connector.Connection
redshift_connector connection.
Examples
--------
Fetching Redshift connection from Glue Catalog
>>> import awswrangler as wr
>>> con = wr.redshift.connect("MY_GLUE_CONNECTION")
>>> with con.cursor() as cursor:
>>> cursor.execute("SELECT 1")
>>> print(cursor.fetchall())
>>> con.close()
Fetching Redshift connection from Secrets Manager
>>> import awswrangler as wr
>>> con = wr.redshift.connect(secret_id="MY_SECRET")
>>> with con.cursor() as cursor:
>>> cursor.execute("SELECT 1")
>>> print(cursor.fetchall())
>>> con.close()
"""
attrs: _db_utils.ConnectionAttributes = _db_utils.get_connection_attributes(
connection=connection, secret_id=secret_id, catalog_id=catalog_id, dbname=dbname, boto3_session=boto3_session
)
if attrs.kind != "redshift":
raise exceptions.InvalidDatabaseType(
f"Invalid connection type ({attrs.kind}. It must be a redshift connection.)"
)
return redshift_connector.connect(
user=attrs.user,
database=attrs.database,
password=attrs.password,
port=int(attrs.port),
host=attrs.host,
ssl=ssl,
timeout=timeout,
max_prepared_statements=max_prepared_statements,
tcp_keepalive=tcp_keepalive,
)
def connect_temp(
cluster_identifier: str,
user: str,
database: Optional[str] = None,
duration: int = 900,
auto_create: bool = True,
db_groups: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
ssl: bool = True,
timeout: Optional[int] = None,
max_prepared_statements: int = 1000,
tcp_keepalive: bool = True,
) -> redshift_connector.Connection:
"""Return a redshift_connector temporary connection (No password required).
https://github.com/aws/amazon-redshift-python-driver
Parameters
----------
cluster_identifier : str
The unique identifier of a cluster.
This parameter is case sensitive.
user : str, optional
The name of a database user.
database : str, optional
Database name. If None, the default Database is used.
duration : int, optional
The number of seconds until the returned temporary password expires.
Constraint: minimum 900, maximum 3600.
Default: 900
auto_create : bool
Create a database user with the name specified for the user named in user if one does not exist.
db_groups : List[str], optional
A list of the names of existing database groups that the user named in user will join for the current session,
in addition to any group memberships for an existing user. If not specified, a new user is added only to PUBLIC.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
ssl: bool
This governs SSL encryption for TCP/IP sockets.
This parameter is forward to redshift_connector.
https://github.com/aws/amazon-redshift-python-driver
timeout: Optional[int]
This is the time in seconds before the connection to the server will time out.
The default is None which means no timeout.
This parameter is forward to redshift_connector.
https://github.com/aws/amazon-redshift-python-driver
max_prepared_statements: int
This parameter is forward to redshift_connector.
https://github.com/aws/amazon-redshift-python-driver
tcp_keepalive: bool
If True then use TCP keepalive. The default is True.
This parameter is forward to redshift_connector.
https://github.com/aws/amazon-redshift-python-driver
Returns
-------
redshift_connector.Connection
redshift_connector connection.
Examples
--------
>>> import awswrangler as wr
>>> con = wr.redshift.connect("MY_GLUE_CONNECTION")
>>> with con.cursor() as cursor:
>>> cursor.execute("SELECT 1")
>>> print(cursor.fetchall())
>>> con.close()
"""
client_redshift: boto3.client = _utils.client(service_name="redshift", session=boto3_session)
args: Dict[str, Any] = {
"DbUser": user,
"ClusterIdentifier": cluster_identifier,
"DurationSeconds": duration,
"AutoCreate": auto_create,
}
if db_groups is not None:
args["DbGroups"] = db_groups
else:
db_groups = []
res: Dict[str, Any] = client_redshift.get_cluster_credentials(**args)
cluster: Dict[str, Any] = client_redshift.describe_clusters(ClusterIdentifier=cluster_identifier)["Clusters"][0]
return redshift_connector.connect(
user=res["DbUser"],
database=database if database else cluster["DBName"],
password=res["DbPassword"],
port=cluster["Endpoint"]["Port"],
host=cluster["Endpoint"]["Address"],
ssl=ssl,
timeout=timeout,
max_prepared_statements=max_prepared_statements,
tcp_keepalive=tcp_keepalive,
db_groups=db_groups,
)
def read_sql_query(
sql: str,
con: redshift_connector.Connection,
index_col: Optional[Union[str, List[str]]] = None,
params: Optional[Union[List[Any], Tuple[Any, ...], Dict[Any, Any]]] = None,
chunksize: Optional[int] = None,
dtype: Optional[Dict[str, pa.DataType]] = None,
safe: bool = True,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Return a DataFrame corresponding to the result set of the query string.
Note
----
For large extractions (1K+ rows) consider the function **wr.redshift.unload()**.
Parameters
----------
sql : str
SQL query.
con : redshift_connector.Connection
Use redshift_connector.connect() to use "
"credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog.
index_col : Union[str, List[str]], optional
Column(s) to set as index(MultiIndex).
params : Union[List, Tuple, Dict], optional
List of parameters to pass to execute method.
The syntax used to pass parameters is database driver dependent.
Check your database driver documentation for which of the five syntax styles,
described in PEP 249’s paramstyle, is supported.
chunksize : int, optional
If specified, return an iterator where chunksize is the number of rows to include in each chunk.
dtype : Dict[str, pyarrow.DataType], optional
Specifying the datatype for columns.
The keys should be the column names and the values should be the PyArrow types.
safe : bool
Check for overflows or other unsafe data type conversions.
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Result as Pandas DataFrame(s).
Examples
--------
Reading from Redshift using a Glue Catalog Connections
>>> import awswrangler as wr
>>> con = wr.redshift.connect("MY_GLUE_CONNECTION")
>>> df = wr.redshift.read_sql_query(
... sql="SELECT * FROM public.my_table",
... con=con
... )
>>> con.close()
"""
_validate_connection(con=con)
return _db_utils.read_sql_query(
sql=sql, con=con, index_col=index_col, params=params, chunksize=chunksize, dtype=dtype, safe=safe
)
def read_sql_table(
table: str,
con: redshift_connector.Connection,
schema: Optional[str] = None,
index_col: Optional[Union[str, List[str]]] = None,
params: Optional[Union[List[Any], Tuple[Any, ...], Dict[Any, Any]]] = None,
chunksize: Optional[int] = None,
dtype: Optional[Dict[str, pa.DataType]] = None,
safe: bool = True,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Return a DataFrame corresponding the table.
Note
----
For large extractions (1K+ rows) consider the function **wr.redshift.unload()**.
Parameters
----------
table : str
Table name.
con : redshift_connector.Connection
Use redshift_connector.connect() to use "
"credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog.
schema : str, optional
Name of SQL schema in database to query (if database flavor supports this).
Uses default schema if None (default).
index_col : Union[str, List[str]], optional
Column(s) to set as index(MultiIndex).
params : Union[List, Tuple, Dict], optional
List of parameters to pass to execute method.
The syntax used to pass parameters is database driver dependent.
Check your database driver documentation for which of the five syntax styles,
described in PEP 249’s paramstyle, is supported.
chunksize : int, optional
If specified, return an iterator where chunksize is the number of rows to include in each chunk.
dtype : Dict[str, pyarrow.DataType], optional
Specifying the datatype for columns.
The keys should be the column names and the values should be the PyArrow types.
safe : bool
Check for overflows or other unsafe data type conversions.
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Result as Pandas DataFrame(s).
Examples
--------
Reading from Redshift using a Glue Catalog Connections
>>> import awswrangler as wr
>>> con = wr.redshift.connect("MY_GLUE_CONNECTION")
>>> df = wr.redshift.read_sql_table(
... table="my_table",
... schema="public",
... con=con
... )
>>> con.close()
"""
sql: str = f'SELECT * FROM "{table}"' if schema is None else f'SELECT * FROM "{schema}"."{table}"'
return read_sql_query(
sql=sql, con=con, index_col=index_col, params=params, chunksize=chunksize, dtype=dtype, safe=safe
)
def to_sql(
df: pd.DataFrame,
con: redshift_connector.Connection,
table: str,
schema: str,
mode: str = "append",
index: bool = False,
dtype: Optional[Dict[str, str]] = None,
diststyle: str = "AUTO",
distkey: Optional[str] = None,
sortstyle: str = "COMPOUND",
sortkey: Optional[List[str]] = None,
primary_keys: Optional[List[str]] = None,
varchar_lengths_default: int = 256,
varchar_lengths: Optional[Dict[str, int]] = None,
) -> None:
"""Write records stored in a DataFrame into Redshift.
Note
----
For large DataFrames (1K+ rows) consider the function **wr.redshift.copy()**.
Parameters
----------
df : pandas.DataFrame
Pandas DataFrame https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html
con : redshift_connector.Connection
Use redshift_connector.connect() to use "
"credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog.
table : str
Table name
schema : str
Schema name
mode : str
Append, overwrite or upsert.
index : bool
True to store the DataFrame index as a column in the table,
otherwise False to ignore it.
dtype: Dict[str, str], optional
Dictionary of columns names and Redshift types to be casted.
Useful when you have columns with undetermined or mixed data types.
(e.g. {'col name': 'VARCHAR(10)', 'col2 name': 'FLOAT'})
diststyle : str
Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"].
https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html
distkey : str, optional
Specifies a column name or positional number for the distribution key.
sortstyle : str
Sorting can be "COMPOUND" or "INTERLEAVED".
https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html
sortkey : List[str], optional
List of columns to be sorted.
primary_keys : List[str], optional
Primary keys.
varchar_lengths_default : int
The size that will be set for all VARCHAR columns not specified with varchar_lengths.
varchar_lengths : Dict[str, int], optional
Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}).
Returns
-------
None
None.
Examples
--------
Writing to Redshift using a Glue Catalog Connections
>>> import awswrangler as wr
>>> con = wr.redshift.connect("MY_GLUE_CONNECTION")
>>> wr.redshift.to_sql(
... df=df
... table="my_table",
... schema="public",
... con=con
... )
>>> con.close()
"""
if df.empty is True:
raise exceptions.EmptyDataFrame()
_validate_connection(con=con)
autocommit_temp: bool = con.autocommit
con.autocommit = False
try:
with con.cursor() as cursor:
created_table, created_schema = _create_table(
df=df,
path=None,
cursor=cursor,
table=table,
schema=schema,
mode=mode,
index=index,
dtype=dtype,
diststyle=diststyle,
sortstyle=sortstyle,
distkey=distkey,
sortkey=sortkey,
primary_keys=primary_keys,
varchar_lengths_default=varchar_lengths_default,
varchar_lengths=varchar_lengths,
)
if index:
df.reset_index(level=df.index.names, inplace=True)
placeholders: str = ", ".join(["%s"] * len(df.columns))
schema_str = f'"{created_schema}".' if created_schema else ""
sql: str = f'INSERT INTO {schema_str}"{created_table}" VALUES ({placeholders})'
_logger.debug("sql: %s", sql)
parameters: List[List[Any]] = _db_utils.extract_parameters(df=df)
cursor.executemany(sql, parameters)
if table != created_table: # upsert
_upsert(cursor=cursor, schema=schema, table=table, temp_table=created_table, primary_keys=primary_keys)
con.commit()
except Exception as ex:
con.rollback()
_logger.error(ex)
raise
finally:
con.autocommit = autocommit_temp
def unload_to_files(
sql: str,
path: str,
con: redshift_connector.Connection,
iam_role: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
region: Optional[str] = None,
max_file_size: Optional[float] = None,
kms_key_id: Optional[str] = None,
manifest: bool = False,
use_threads: bool = True,
partition_cols: Optional[List[str]] = None,
boto3_session: Optional[boto3.Session] = None,
) -> None:
"""Unload Parquet files on s3 from a Redshift query result (Through the UNLOAD command).
https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html
Note
----
In case of `use_threads=True` the number of threads
that will be spawned will be gotten from os.cpu_count().
Parameters
----------
sql: str
SQL query.
path : Union[str, List[str]]
S3 path to write stage files (e.g. s3://bucket_name/any_name/)
con : redshift_connector.Connection
Use redshift_connector.connect() to use "
"credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog.
iam_role : str, optional
AWS IAM role with the related permissions.
aws_access_key_id : str, optional
The access key for your AWS account.
aws_secret_access_key : str, optional
The secret key for your AWS account.
aws_session_token : str, optional
The session key for your AWS account. This is only needed when you are using temporary credentials.
region : str, optional
Specifies the AWS Region where the target Amazon S3 bucket is located.
REGION is required for UNLOAD to an Amazon S3 bucket that isn't in the
same AWS Region as the Amazon Redshift cluster. By default, UNLOAD
assumes that the target Amazon S3 bucket is located in the same AWS
Region as the Amazon Redshift cluster.
max_file_size : float, optional
Specifies the maximum size (MB) of files that UNLOAD creates in Amazon S3.
Specify a decimal value between 5.0 MB and 6200.0 MB. If None, the default
maximum file size is 6200.0 MB.
kms_key_id : str, optional
Specifies the key ID for an AWS Key Management Service (AWS KMS) key to be
used to encrypt data files on Amazon S3.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
manifest : bool
Unload a manifest file on S3.
partition_cols: List[str], optional
Specifies the partition keys for the unload operation.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
Returns
-------
None
Examples
--------
>>> import awswrangler as wr
>>> con = wr.redshift.connect("MY_GLUE_CONNECTION")
>>> wr.redshift.unload_to_files(
... sql="SELECT * FROM public.mytable",
... path="s3://bucket/extracted_parquet_files/",
... con=con,
... iam_role="arn:aws:iam::XXX:role/XXX"
... )
>>> con.close()
"""
path = path if path.endswith("/") else f"{path}/"
session: boto3.Session = _utils.ensure_session(session=boto3_session)
s3.delete_objects(path=path, use_threads=use_threads, boto3_session=session)
with con.cursor() as cursor:
partition_str: str = f"\nPARTITION BY ({','.join(partition_cols)})" if partition_cols else ""
manifest_str: str = "\nmanifest" if manifest is True else ""
region_str: str = f"\nREGION AS '{region}'" if region is not None else ""
max_file_size_str: str = f"\nMAXFILESIZE AS {max_file_size} MB" if max_file_size is not None else ""
kms_key_id_str: str = f"\nKMS_KEY_ID '{kms_key_id}'" if kms_key_id is not None else ""
auth_str: str = _make_s3_auth_string(
iam_role=iam_role,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
boto3_session=boto3_session,
)
sql = (
f"UNLOAD ('{sql}')\n"
f"TO '{path}'\n"
f"{auth_str}"
"ALLOWOVERWRITE\n"
"PARALLEL ON\n"
"FORMAT PARQUET\n"
"ENCRYPTED"
f"{kms_key_id_str}"
f"{partition_str}"
f"{region_str}"
f"{max_file_size_str}"
f"{manifest_str};"
)
_logger.debug("sql: \n%s", sql)
cursor.execute(sql)
def unload(
sql: str,
path: str,
con: redshift_connector.Connection,
iam_role: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
region: Optional[str] = None,
max_file_size: Optional[float] = None,
kms_key_id: Optional[str] = None,
categories: Optional[List[str]] = None,
chunked: Union[bool, int] = False,
keep_files: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> Union[pd.DataFrame, Iterator[pd.DataFrame]]:
"""Load Pandas DataFrame from a Amazon Redshift query result using Parquet files on s3 as stage.
This is a **HIGH** latency and **HIGH** throughput alternative to
`wr.redshift.read_sql_query()`/`wr.redshift.read_sql_table()` to extract large
Amazon Redshift data into a Pandas DataFrames through the **UNLOAD command**.
This strategy has more overhead and requires more IAM privileges
than the regular `wr.redshift.read_sql_query()`/`wr.redshift.read_sql_table()` function,
so it is only recommended to fetch 1k+ rows at once.
https://docs.aws.amazon.com/redshift/latest/dg/r_UNLOAD.html
Note
----
``Batching`` (`chunked` argument) (Memory Friendly):
Will anable the function to return a Iterable of DataFrames instead of a regular DataFrame.
There are two batching strategies on Wrangler:
- If **chunked=True**, a new DataFrame will be returned for each file in your path/dataset.
- If **chunked=INTEGER**, Wrangler will iterate on the data by number of rows igual the received INTEGER.
`P.S.` `chunked=True` if faster and uses less memory while `chunked=INTEGER` is more precise
in number of rows for each Dataframe.
Note
----
In case of `use_threads=True` the number of threads
that will be spawned will be gotten from os.cpu_count().
Parameters
----------
sql: str
SQL query.
path : Union[str, List[str]]
S3 path to write stage files (e.g. s3://bucket_name/any_name/)
con : redshift_connector.Connection
Use redshift_connector.connect() to use "
"credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog.
iam_role : str, optional
AWS IAM role with the related permissions.
aws_access_key_id : str, optional
The access key for your AWS account.
aws_secret_access_key : str, optional
The secret key for your AWS account.
aws_session_token : str, optional
The session key for your AWS account. This is only needed when you are using temporary credentials.
region : str, optional
Specifies the AWS Region where the target Amazon S3 bucket is located.
REGION is required for UNLOAD to an Amazon S3 bucket that isn't in the
same AWS Region as the Amazon Redshift cluster. By default, UNLOAD
assumes that the target Amazon S3 bucket is located in the same AWS
Region as the Amazon Redshift cluster.
max_file_size : float, optional
Specifies the maximum size (MB) of files that UNLOAD creates in Amazon S3.
Specify a decimal value between 5.0 MB and 6200.0 MB. If None, the default
maximum file size is 6200.0 MB.
kms_key_id : str, optional
Specifies the key ID for an AWS Key Management Service (AWS KMS) key to be
used to encrypt data files on Amazon S3.
categories: List[str], optional
List of columns names that should be returned as pandas.Categorical.
Recommended for memory restricted environments.
keep_files : bool
Should keep stage files?
chunked : Union[int, bool]
If passed will split the data in a Iterable of DataFrames (Memory friendly).
If `True` wrangler will iterate on the data by files in the most efficient way without guarantee of chunksize.
If an `INTEGER` is passed Wrangler will iterate on the data by number of rows igual the received INTEGER.
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to botocore requests, only "SSECustomerAlgorithm" and "SSECustomerKey" arguments will be considered.
Returns
-------
Union[pandas.DataFrame, Iterator[pandas.DataFrame]]
Result as Pandas DataFrame(s).
Examples
--------
>>> import awswrangler as wr
>>> con = wr.redshift.connect("MY_GLUE_CONNECTION")
>>> df = wr.db.unload(
... sql="SELECT * FROM public.mytable",
... path="s3://bucket/extracted_parquet_files/",
... con=con,
... iam_role="arn:aws:iam::XXX:role/XXX"
... )
>>> con.close()
"""
session: boto3.Session = _utils.ensure_session(session=boto3_session)
unload_to_files(
sql=sql,
path=path,
con=con,
iam_role=iam_role,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
region=region,
max_file_size=max_file_size,
kms_key_id=kms_key_id,
manifest=False,
use_threads=use_threads,
boto3_session=session,
)
if chunked is False:
df: pd.DataFrame = s3.read_parquet(
path=path,
categories=categories,
chunked=chunked,
dataset=False,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
)
if keep_files is False:
s3.delete_objects(
path=path, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs
)
return df
return _read_parquet_iterator(
path=path,
categories=categories,
chunked=chunked,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
keep_files=keep_files,
)
def copy_from_files( # pylint: disable=too-many-locals,too-many-arguments
path: str,
con: redshift_connector.Connection,
table: str,
schema: str,
iam_role: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
parquet_infer_sampling: float = 1.0,
mode: str = "append",
diststyle: str = "AUTO",
distkey: Optional[str] = None,
sortstyle: str = "COMPOUND",
sortkey: Optional[List[str]] = None,
primary_keys: Optional[List[str]] = None,
varchar_lengths_default: int = 256,
varchar_lengths: Optional[Dict[str, int]] = None,
serialize_to_json: bool = False,
path_suffix: Optional[str] = None,
path_ignore_suffix: Optional[str] = None,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
) -> None:
"""Load Parquet files from S3 to a Table on Amazon Redshift (Through COPY command).
https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html
Note
----
If the table does not exist yet,
it will be automatically created for you
using the Parquet metadata to
infer the columns data types.
Note
----
In case of `use_threads=True` the number of threads
that will be spawned will be gotten from os.cpu_count().
Parameters
----------
path : str
S3 prefix (e.g. s3://bucket/prefix/)
con : redshift_connector.Connection
Use redshift_connector.connect() to use "
"credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog.
table : str
Table name
schema : str
Schema name
iam_role : str, optional
AWS IAM role with the related permissions.
aws_access_key_id : str, optional
The access key for your AWS account.
aws_secret_access_key : str, optional
The secret key for your AWS account.
aws_session_token : str, optional
The session key for your AWS account. This is only needed when you are using temporary credentials.
parquet_infer_sampling : float
Random sample ratio of files that will have the metadata inspected.
Must be `0.0 < sampling <= 1.0`.
The higher, the more accurate.
The lower, the faster.
mode : str
Append, overwrite or upsert.
diststyle : str
Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"].
https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html
distkey : str, optional
Specifies a column name or positional number for the distribution key.
sortstyle : str
Sorting can be "COMPOUND" or "INTERLEAVED".
https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html
sortkey : List[str], optional
List of columns to be sorted.
primary_keys : List[str], optional
Primary keys.
varchar_lengths_default : int
The size that will be set for all VARCHAR columns not specified with varchar_lengths.
varchar_lengths : Dict[str, int], optional
Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}).
serialize_to_json : bool
Should Wrangler add SERIALIZETOJSON parameter into the COPY command?
SERIALIZETOJSON is necessary to load nested data
https://docs.aws.amazon.com/redshift/latest/dg/ingest-super.html#copy_json
path_suffix: Union[str, List[str], None]
Suffix or List of suffixes to be scanned on s3 for the schema extraction
(e.g. [".gz.parquet", ".snappy.parquet"]).
Only has effect during the table creation.
If None, will try to read all files. (default)
path_ignore_suffix: Union[str, List[str], None]
Suffix or List of suffixes for S3 keys to be ignored during the schema extraction.
(e.g. [".csv", "_SUCCESS"]).
Only has effect during the table creation.
If None, will try to read all files. (default)
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to botocore requests. Valid parameters: "ACL", "Metadata", "ServerSideEncryption", "StorageClass",
"SSECustomerAlgorithm", "SSECustomerKey", "SSEKMSKeyId", "SSEKMSEncryptionContext", "Tagging", "RequestPayer".
e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'}
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> con = wr.redshift.connect("MY_GLUE_CONNECTION")
>>> wr.db.copy_from_files(
... path="s3://bucket/my_parquet_files/",
... con=con,
... table="my_table",
... schema="public"
... iam_role="arn:aws:iam::XXX:role/XXX"
... )
>>> con.close()
"""
autocommit_temp: bool = con.autocommit
con.autocommit = False
try:
with con.cursor() as cursor:
created_table, created_schema = _create_table(
df=None,
path=path,
parquet_infer_sampling=parquet_infer_sampling,
path_suffix=path_suffix,
path_ignore_suffix=path_ignore_suffix,
cursor=cursor,
table=table,
schema=schema,
mode=mode,
diststyle=diststyle,
sortstyle=sortstyle,
distkey=distkey,
sortkey=sortkey,
primary_keys=primary_keys,
varchar_lengths_default=varchar_lengths_default,
varchar_lengths=varchar_lengths,
index=False,
dtype=None,
use_threads=use_threads,
boto3_session=boto3_session,
s3_additional_kwargs=s3_additional_kwargs,
)
_copy(
cursor=cursor,
path=path,
table=created_table,
schema=created_schema,
iam_role=iam_role,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
boto3_session=boto3_session,
serialize_to_json=serialize_to_json,
)
if table != created_table: # upsert
_upsert(cursor=cursor, schema=schema, table=table, temp_table=created_table, primary_keys=primary_keys)
con.commit()
except Exception as ex:
con.rollback()
_logger.error(ex)
raise
finally:
con.autocommit = autocommit_temp
def copy( # pylint: disable=too-many-arguments
df: pd.DataFrame,
path: str,
con: redshift_connector.Connection,
table: str,
schema: str,
iam_role: Optional[str] = None,
aws_access_key_id: Optional[str] = None,
aws_secret_access_key: Optional[str] = None,
aws_session_token: Optional[str] = None,
index: bool = False,
dtype: Optional[Dict[str, str]] = None,
mode: str = "append",
diststyle: str = "AUTO",
distkey: Optional[str] = None,
sortstyle: str = "COMPOUND",
sortkey: Optional[List[str]] = None,
primary_keys: Optional[List[str]] = None,
varchar_lengths_default: int = 256,
varchar_lengths: Optional[Dict[str, int]] = None,
serialize_to_json: bool = False,
keep_files: bool = False,
use_threads: bool = True,
boto3_session: Optional[boto3.Session] = None,
s3_additional_kwargs: Optional[Dict[str, str]] = None,
max_rows_by_file: Optional[int] = 10_000_000,
) -> None:
"""Load Pandas DataFrame as a Table on Amazon Redshift using parquet files on S3 as stage.
This is a **HIGH** latency and **HIGH** throughput alternative to `wr.redshift.to_sql()` to load large
DataFrames into Amazon Redshift through the ** SQL COPY command**.
This strategy has more overhead and requires more IAM privileges
than the regular `wr.redshift.to_sql()` function, so it is only recommended
to inserting +1K rows at once.
https://docs.aws.amazon.com/redshift/latest/dg/r_COPY.html
Note
----
If the table does not exist yet,
it will be automatically created for you
using the Parquet metadata to
infer the columns data types.
Note
----
In case of `use_threads=True` the number of threads
that will be spawned will be gotten from os.cpu_count().
Parameters
----------
df: pandas.DataFrame
Pandas DataFrame.
path : str
S3 path to write stage files (e.g. s3://bucket_name/any_name/).
Note: This path must be empty.
con : redshift_connector.Connection
Use redshift_connector.connect() to use "
"credentials directly or wr.redshift.connect() to fetch it from the Glue Catalog.
table : str
Table name
schema : str
Schema name
iam_role : str, optional
AWS IAM role with the related permissions.
aws_access_key_id : str, optional
The access key for your AWS account.
aws_secret_access_key : str, optional
The secret key for your AWS account.
aws_session_token : str, optional
The session key for your AWS account. This is only needed when you are using temporary credentials.
index : bool
True to store the DataFrame index in file, otherwise False to ignore it.
dtype: Dict[str, str], optional
Dictionary of columns names and Athena/Glue types to be casted.
Useful when you have columns with undetermined or mixed data types.
Only takes effect if dataset=True.
(e.g. {'col name': 'bigint', 'col2 name': 'int'})
mode : str
Append, overwrite or upsert.
diststyle : str
Redshift distribution styles. Must be in ["AUTO", "EVEN", "ALL", "KEY"].
https://docs.aws.amazon.com/redshift/latest/dg/t_Distributing_data.html
distkey : str, optional
Specifies a column name or positional number for the distribution key.
sortstyle : str
Sorting can be "COMPOUND" or "INTERLEAVED".
https://docs.aws.amazon.com/redshift/latest/dg/t_Sorting_data.html
sortkey : List[str], optional
List of columns to be sorted.
primary_keys : List[str], optional
Primary keys.
varchar_lengths_default : int
The size that will be set for all VARCHAR columns not specified with varchar_lengths.
varchar_lengths : Dict[str, int], optional
Dict of VARCHAR length by columns. (e.g. {"col1": 10, "col5": 200}).
keep_files : bool
Should keep stage files?
use_threads : bool
True to enable concurrent requests, False to disable multiple threads.
If enabled os.cpu_count() will be used as the max number of threads.
boto3_session : boto3.Session(), optional
Boto3 Session. The default boto3 session will be used if boto3_session receive None.
s3_additional_kwargs:
Forward to botocore requests. Valid parameters: "ACL", "Metadata", "ServerSideEncryption", "StorageClass",
"SSECustomerAlgorithm", "SSECustomerKey", "SSEKMSKeyId", "SSEKMSEncryptionContext", "Tagging", "RequestPayer".
e.g. s3_additional_kwargs={'ServerSideEncryption': 'aws:kms', 'SSEKMSKeyId': 'YOUR_KMS_KEY_ARN'}
max_rows_by_file : int
Max number of rows in each file.
Default is None i.e. dont split the files.
(e.g. 33554432, 268435456)
Returns
-------
None
None.
Examples
--------
>>> import awswrangler as wr
>>> import pandas as pd
>>> con = wr.redshift.connect("MY_GLUE_CONNECTION")
>>> wr.db.copy(
... df=pd.DataFrame({'col': [1, 2, 3]}),
... path="s3://bucket/my_parquet_files/",
... con=con,
... table="my_table",
... schema="public"
... iam_role="arn:aws:iam::XXX:role/XXX"
... )
>>> con.close()
"""
path = path[:-1] if path.endswith("*") else path
path = path if path.endswith("/") else f"{path}/"
session: boto3.Session = _utils.ensure_session(session=boto3_session)
if s3.list_objects(path=path, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs):
raise exceptions.InvalidArgument(
f"The received S3 path ({path}) is not empty. "
"Please, provide a different path or use wr.s3.delete_objects() to clean up the current one."
)
try:
s3.to_parquet(
df=df,
path=path,
index=index,
dataset=True,
mode="append",
dtype=dtype,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
max_rows_by_file=max_rows_by_file,
)
copy_from_files(
path=path,
con=con,
table=table,
schema=schema,
iam_role=iam_role,
aws_access_key_id=aws_access_key_id,
aws_secret_access_key=aws_secret_access_key,
aws_session_token=aws_session_token,
mode=mode,
diststyle=diststyle,
distkey=distkey,
sortstyle=sortstyle,
sortkey=sortkey,
primary_keys=primary_keys,
varchar_lengths_default=varchar_lengths_default,
varchar_lengths=varchar_lengths,
serialize_to_json=serialize_to_json,
use_threads=use_threads,
boto3_session=session,
s3_additional_kwargs=s3_additional_kwargs,
)
finally:
if keep_files is False:
s3.delete_objects(
path=path, use_threads=use_threads, boto3_session=session, s3_additional_kwargs=s3_additional_kwargs
)
| 38.458273 | 120 | 0.653516 |
aceefa852c3877ee87f79c5168e4487de19690ce | 2,819 | py | Python | benchmarks_FEM_active/T/TDiff/TDiff.py | GeoStat-Framework/ogs5py_benchmarks | 0b6db19b87cfad36459757f99ce2458f8e12b20b | [
"BSD-4-Clause"
] | 3 | 2019-01-15T17:38:11.000Z | 2020-01-07T23:44:12.000Z | benchmarks_FEM_active/T/TDiff/TDiff.py | GeoStat-Framework/ogs5py_benchmarks | 0b6db19b87cfad36459757f99ce2458f8e12b20b | [
"BSD-4-Clause"
] | 1 | 2020-05-12T09:18:09.000Z | 2020-05-12T10:48:32.000Z | benchmarks_FEM_active/T/TDiff/TDiff.py | GeoStat-Framework/ogs5py_benchmarks | 0b6db19b87cfad36459757f99ce2458f8e12b20b | [
"BSD-4-Clause"
] | 1 | 2020-01-08T13:28:50.000Z | 2020-01-08T13:28:50.000Z | # -*- coding: utf-8 -*-
from ogs5py import OGS
model = OGS(
task_root='TDiff_root',
task_id='TDiff',
output_dir='out',
)
model.msh.read_file('TDiff.msh')
model.gli.read_file('TDiff.gli')
model.pcs.add_block(
main_key='PROCESS',
PCS_TYPE='HEAT_TRANSPORT',
TEMPERATURE_UNIT='KELVIN',
)
model.bc.add_block(
main_key='BOUNDARY_CONDITION',
PCS_TYPE='HEAT_TRANSPORT',
PRIMARY_VARIABLE='TEMPERATURE1',
GEO_TYPE=['POINT', 'POINT0'],
DIS_TYPE=['CONSTANT', 1],
)
model.ic.add_block(
main_key='INITIAL_CONDITION',
PCS_TYPE='HEAT_TRANSPORT',
PRIMARY_VARIABLE='TEMPERATURE1',
GEO_TYPE='DOMAIN',
DIS_TYPE=['CONSTANT', 0],
)
model.ic.add_block(
main_key='INITIAL_CONDITION',
PCS_TYPE='HEAT_TRANSPORT',
PRIMARY_VARIABLE='TEMPERATURE1',
GEO_TYPE=['POINT', 'POINT0'],
DIS_TYPE=['CONSTANT', 1],
)
model.mmp.add_block(
main_key='MEDIUM_PROPERTIES',
GEOMETRY_DIMENSION=1,
GEOMETRY_AREA=1.0,
POROSITY=[1, 0.1],
STORAGE=[1, 0.0],
TORTUOSITY=[1, 1.0],
PERMEABILITY_TENSOR=['ISOTROPIC', 1e-15],
HEAT_DISPERSION=[1, 0.0, 0.0],
)
model.msp.add_block(
main_key='SOLID_PROPERTIES',
DENSITY=[1, 2500],
THERMAL=[
['EXPANSION:'],
[1e-05],
['CAPACITY:'],
[1, 1000],
['CONDUCTIVITY:'],
[1, 3.2],
],
)
model.mfp.add_block(
main_key='FLUID_PROPERTIES',
FLUID_TYPE='LIQUID',
PCS_TYPE='PRESSURE1',
DENSITY=[1, 1000.0],
VISCOSITY=[1, 0.0],
SPECIFIC_HEAT_CAPACITY=[1, 0.0],
HEAT_CONDUCTIVITY=[1, 0.0],
)
model.num.add_block(
main_key='NUMERICS',
PCS_TYPE='HEAT_TRANSPORT',
LINEAR_SOLVER=[2, 0, 1e-12, 1000, 1.0, 100, 4],
ELE_GAUSS_POINTS=2,
NON_LINEAR_SOLVER=['PICARD', 0.001, 25, 0.0],
)
model.tim.add_block(
main_key='TIME_STEPPING',
PCS_TYPE='HEAT_TRANSPORT',
TIME_STEPS=[1000, 390625.0],
TIME_END=1e+99,
TIME_START=0.0,
)
model.out.add_block(
main_key='OUTPUT',
PCS_TYPE='HEAT_TRANSPORT',
NOD_VALUES='TEMPERATURE1',
GEO_TYPE=['POLYLINE', 'ROCK'],
TIM_TYPE=['STEPS', 1],
)
model.out.add_block(
main_key='OUTPUT',
PCS_TYPE='HEAT_TRANSPORT',
NOD_VALUES='TEMPERATURE1',
GEO_TYPE=['POINT', 'POINT2'],
TIM_TYPE=['STEPS', 1],
)
model.out.add_block(
main_key='OUTPUT',
PCS_TYPE='HEAT_TRANSPORT',
NOD_VALUES='TEMPERATURE1',
GEO_TYPE=['POINT', 'POINT3'],
TIM_TYPE=['STEPS', 1],
)
model.out.add_block(
main_key='OUTPUT',
PCS_TYPE='HEAT_TRANSPORT',
NOD_VALUES='TEMPERATURE1',
GEO_TYPE=['POINT', 'POINT4'],
TIM_TYPE=['STEPS', 1],
)
model.out.add_block(
main_key='OUTPUT',
PCS_TYPE='HEAT_TRANSPORT',
NOD_VALUES='TEMPERATURE1',
GEO_TYPE=['POINT', 'POINT5'],
TIM_TYPE=['STEPS', 1],
)
model.write_input()
model.run_model()
| 23.689076 | 51 | 0.63817 |
aceefb86a4e5ecf08f3982f553c0a242f41c7268 | 52 | py | Python | pyunc/__main__.py | jstutters/PyUNC | 15f50d868ce2cfc29eeebb3b932cd5834b9e8cee | [
"MIT"
] | null | null | null | pyunc/__main__.py | jstutters/PyUNC | 15f50d868ce2cfc29eeebb3b932cd5834b9e8cee | [
"MIT"
] | 3 | 2018-08-22T10:02:48.000Z | 2020-03-13T15:25:13.000Z | pyunc/__main__.py | jstutters/PyUNC | 15f50d868ce2cfc29eeebb3b932cd5834b9e8cee | [
"MIT"
] | null | null | null | from pyunc.cli import unc_to_nifti
unc_to_nifti()
| 10.4 | 34 | 0.807692 |
aceefc44e716f7fafc88c27ca210aaf54651f2d0 | 11,957 | py | Python | model_zoo/ernie-doc/metrics.py | mukaiu/PaddleNLP | 0315365dbafa6e3b1c7147121ba85e05884125a5 | [
"Apache-2.0"
] | null | null | null | model_zoo/ernie-doc/metrics.py | mukaiu/PaddleNLP | 0315365dbafa6e3b1c7147121ba85e05884125a5 | [
"Apache-2.0"
] | null | null | null | model_zoo/ernie-doc/metrics.py | mukaiu/PaddleNLP | 0315365dbafa6e3b1c7147121ba85e05884125a5 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import collections
import sys
import paddle
from paddle.utils import try_import
from paddle.metric import Metric
from paddlenlp.metrics.dureader import get_final_text, _compute_softmax, _get_best_indexes
# Metric for ERNIE-DOCs
class F1(object):
def __init__(self, positive_label=1):
self.positive_label = positive_label
self.reset()
def compute(self, preds, labels):
if isinstance(preds, paddle.Tensor):
preds = preds.numpy()
elif isinstance(preds, list):
preds = np.array(preds, dtype='float32')
if isinstance(labels, list):
labels = np.array(labels, dtype='int64')
elif isinstance(labels, paddle.Tensor):
labels = labels.numpy()
preds = np.argmax(preds, axis=1)
tp = ((preds == labels) & (labels == self.positive_label)).sum()
fn = ((preds != labels) & (labels == self.positive_label)).sum()
fp = ((preds != labels) & (preds == self.positive_label)).sum()
return tp, fp, fn
def update(self, statistic):
tp, fp, fn = statistic
self.tp += tp
self.fp += fp
self.fn += fn
def accumulate(self):
recall = self.tp / (self.tp + self.fn)
precision = self.tp / (self.tp + self.fp)
f1 = 2 * recall * precision / (recall + precision)
return f1
def reset(self):
self.tp = 0
self.fp = 0
self.fn = 0
class EM_AND_F1(object):
def __init__(self):
self.nltk = try_import('nltk')
self.re = try_import('re')
def _mixed_segmentation(self, in_str, rm_punc=False):
"""mixed_segmentation"""
in_str = in_str.lower().strip()
segs_out = []
temp_str = ""
sp_char = [
'-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=', ',', '。',
':', '?', '!', '“', '”', ';', '’', '《', '》', '……', '·', '、', '「',
'」', '(', ')', '-', '~', '『', '』'
]
for char in in_str:
if rm_punc and char in sp_char:
continue
pattern = '[\\u4e00-\\u9fa5]'
if self.re.search(pattern, char) or char in sp_char:
if temp_str != "":
ss = self.nltk.word_tokenize(temp_str)
segs_out.extend(ss)
temp_str = ""
segs_out.append(char)
else:
temp_str += char
#Handling last part
if temp_str != "":
ss = self.nltk.word_tokenize(temp_str)
segs_out.extend(ss)
return segs_out
# Remove punctuation
def _remove_punctuation(self, in_str):
"""remove_punctuation"""
in_str = in_str.lower().strip()
sp_char = [
'-', ':', '_', '*', '^', '/', '\\', '~', '`', '+', '=', ',', '。',
':', '?', '!', '“', '”', ';', '’', '《', '》', '……', '·', '、', '「',
'」', '(', ')', '-', '~', '『', '』'
]
out_segs = []
for char in in_str:
if char in sp_char:
continue
else:
out_segs.append(char)
return ''.join(out_segs)
# Find longest common string
def _find_lcs(self, s1, s2):
m = [[0 for i in range(len(s2) + 1)] for j in range(len(s1) + 1)]
mmax = 0
p = 0
for i in range(len(s1)):
for j in range(len(s2)):
if s1[i] == s2[j]:
m[i + 1][j + 1] = m[i][j] + 1
if m[i + 1][j + 1] > mmax:
mmax = m[i + 1][j + 1]
p = i + 1
return s1[p - mmax:p], mmax
def _calc_f1_score(self, answers, prediction):
f1_scores = []
for ans in answers:
ans_segs = self._mixed_segmentation(ans, rm_punc=True)
prediction_segs = self._mixed_segmentation(prediction, rm_punc=True)
lcs, lcs_len = self._find_lcs(ans_segs, prediction_segs)
if lcs_len == 0:
f1_scores.append(0)
continue
precision = 1.0 * lcs_len / len(prediction_segs)
recall = 1.0 * lcs_len / len(ans_segs)
f1 = (2 * precision * recall) / (precision + recall)
f1_scores.append(f1)
return max(f1_scores)
def _calc_em_score(self, answers, prediction):
em = 0
for ans in answers:
ans_ = self._remove_punctuation(ans)
prediction_ = self._remove_punctuation(prediction)
if ans_ == prediction_:
em = 1
break
return em
def __call__(self, prediction, ground_truth):
f1 = 0
em = 0
total_count = 0
skip_count = 0
for instance in ground_truth:
total_count += 1
query_id = instance['id']
query_text = instance['question'].strip()
answers = instance["answers"]
if query_id not in prediction:
sys.stderr.write('Unanswered question: {}\n'.format(query_id))
skip_count += 1
continue
preds = str(prediction[query_id])
f1 += self._calc_f1_score(answers, preds)
em += self._calc_em_score(answers, preds)
f1_score = 100.0 * f1 / total_count
em_score = 100.0 * em / total_count
avg_score = (f1_score + em_score) * 0.5
return em_score, f1_score, avg_score, total_count
def compute_qa_predictions(all_examples, all_features, all_results, n_best_size,
max_answer_length, do_lower_case, tokenizer,
verbose):
"""Write final predictions to the json file and log-odds of null if needed."""
example_index_to_features = collections.defaultdict(list)
for feature in all_features:
example_index_to_features[feature.example_index].append(feature)
unique_id_to_result = {}
for result in all_results:
unique_id_to_result[result.unique_id] = result
_PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name
"PrelimPrediction", [
"feature_index", "start_index", "end_index", "start_logit",
"end_logit"
])
all_predictions = collections.OrderedDict()
all_nbest_json = collections.OrderedDict()
for (example_index, example) in enumerate(all_examples):
features = example_index_to_features[example_index]
prelim_predictions = []
# Keep track of the minimum score of null start+end of position 0
for (feature_index, feature) in enumerate(features):
result = unique_id_to_result[feature.qid]
start_indexes = _get_best_indexes(result.start_logits, n_best_size)
end_indexes = _get_best_indexes(result.end_logits, n_best_size)
for start_index in start_indexes:
for end_index in end_indexes:
# We could hypothetically create invalid predictions, e.g., predict
# that the start of the span is in the question. We throw out all
# invalid predictions.
if start_index >= len(feature.tokens):
continue
if end_index >= len(feature.tokens):
continue
if start_index not in feature.token_to_orig_map:
continue
if end_index not in feature.token_to_orig_map:
continue
if not feature.token_is_max_context.get(start_index, False):
continue
if end_index < start_index:
continue
length = end_index - start_index + 1
if length > max_answer_length:
continue
prelim_predictions.append(
_PrelimPrediction(
feature_index=feature_index,
start_index=start_index,
end_index=end_index,
start_logit=result.start_logits[start_index],
end_logit=result.end_logits[end_index]))
prelim_predictions = sorted(prelim_predictions,
key=lambda x: (x.start_logit + x.end_logit),
reverse=True)
_NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name
"NbestPrediction", ["text", "start_logit", "end_logit"])
seen_predictions = {}
nbest = []
for pred in prelim_predictions:
if len(nbest) >= n_best_size:
break
feature = features[pred.feature_index]
if pred.start_index > 0: # this is a non-null prediction
tok_tokens = feature.tokens[pred.start_index:(pred.end_index +
1)]
orig_doc_start = feature.token_to_orig_map[pred.start_index]
orig_doc_end = feature.token_to_orig_map[pred.end_index]
orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end +
1)]
tok_text = " ".join(tok_tokens)
# De-tokenize WordPieces that have been split off.
tok_text = tok_text.replace(" ##", "")
tok_text = tok_text.replace("##", "")
# Clean whitespace
tok_text = tok_text.strip()
tok_text = " ".join(tok_text.split())
orig_text = "".join(orig_tokens)
final_text = get_final_text(tok_text, orig_text, tokenizer,
verbose)
if final_text in seen_predictions:
continue
seen_predictions[final_text] = True
else:
final_text = ""
seen_predictions[final_text] = True
nbest.append(
_NbestPrediction(text=final_text,
start_logit=pred.start_logit,
end_logit=pred.end_logit))
# In very rare edge cases we could have no valid predictions. So we
# just create a nonce prediction in this case to avoid failure.
if not nbest:
nbest.append(
_NbestPrediction(text="empty", start_logit=0.0, end_logit=0.0))
total_scores = []
best_non_null_entry = None
for entry in nbest:
total_scores.append(entry.start_logit + entry.end_logit)
probs = _compute_softmax(total_scores)
nbest_json = []
for (i, entry) in enumerate(nbest):
output = collections.OrderedDict()
output["text"] = entry.text
output["probability"] = probs[i]
output["start_logit"] = entry.start_logit
output["end_logit"] = entry.end_logit
nbest_json.append(output)
assert len(nbest_json) >= 1
all_predictions[example.qas_id] = nbest_json[0]["text"]
all_nbest_json[example.qas_id] = nbest_json
return all_predictions, all_nbest_json
| 37.95873 | 90 | 0.537342 |
aceefca265f980cf04d4e23da8826a33bc0a9fc1 | 4,393 | py | Python | content/vpx.py | vvspacetime/pure-webrtc-transport | 8da32e4d2df21316fc3cc658c3e995757b3b3e08 | [
"BSD-3-Clause"
] | 11 | 2022-02-06T13:56:02.000Z | 2022-03-10T01:26:50.000Z | content/vpx.py | vvspacetime/pure-webrtc-transport | 8da32e4d2df21316fc3cc658c3e995757b3b3e08 | [
"BSD-3-Clause"
] | null | null | null | content/vpx.py | vvspacetime/pure-webrtc-transport | 8da32e4d2df21316fc3cc658c3e995757b3b3e08 | [
"BSD-3-Clause"
] | 1 | 2022-02-27T14:34:43.000Z | 2022-02-27T14:34:43.000Z | from struct import pack, unpack_from, unpack
from typing import List, Tuple, Type, TypeVar
DESCRIPTOR_T = TypeVar("DESCRIPTOR_T")
class Vp8PayloadDescriptor:
def __init__(
self,
partition_start,
partition_id,
picture_id=None,
tl0picidx=None,
tid=None,
keyidx=None,
) -> None:
self.partition_start = partition_start
self.partition_id = partition_id
self.picture_id = picture_id
self.tl0picidx = tl0picidx
self.tid = tid
self.keyidx = keyidx
def __repr__(self) -> str:
return (
f"VpxPayloadDescriptor(S={self.partition_start}, "
f"PID={self.partition_id}, pic_id={self.picture_id})"
)
@classmethod
def parse(cls: Type[DESCRIPTOR_T], data: bytes) -> Tuple[DESCRIPTOR_T, bytes]:
if len(data) < 1:
raise ValueError("VPX descriptor is too short")
# first byte
octet = data[0]
extended = octet >> 7
partition_start = (octet >> 4) & 1
partition_id = octet & 0xF
picture_id = None
tl0picidx = None
tid = None
keyidx = None
pos = 1
# extended control bits
if extended:
if len(data) < pos + 1:
raise ValueError("VPX descriptor has truncated extended bits")
octet = data[pos]
ext_I = (octet >> 7) & 1
ext_L = (octet >> 6) & 1
ext_T = (octet >> 5) & 1
ext_K = (octet >> 4) & 1
pos += 1
# picture id
if ext_I:
if len(data) < pos + 1:
raise ValueError("VPX descriptor has truncated PictureID")
if data[pos] & 0x80:
if len(data) < pos + 2:
raise ValueError("VPX descriptor has truncated long PictureID")
picture_id = unpack_from("!H", data, pos)[0] & 0x7FFF
pos += 2
else:
picture_id = data[pos]
pos += 1
# unused
if ext_L:
if len(data) < pos + 1:
raise ValueError("VPX descriptor has truncated TL0PICIDX")
tl0picidx = data[pos]
pos += 1
if ext_T or ext_K:
if len(data) < pos + 1:
raise ValueError("VPX descriptor has truncated T/K")
t_k = data[pos]
if ext_T:
tid = ((t_k >> 6) & 3, (t_k >> 5) & 1)
if ext_K:
keyidx = t_k & 0x1F
pos += 1
obj = cls(
partition_start=partition_start,
partition_id=partition_id,
picture_id=picture_id,
tl0picidx=tl0picidx,
tid=tid,
keyidx=keyidx,
)
return obj, data[pos:]
class Vp9PayloadDescriptor:
def __init__(self, picture_id=None, tid=None, sid=None, keyframe=None):
self.picture_id = picture_id
self.tid = tid
self.sid = sid
self.keyframe = keyframe
@classmethod
def parse(cls: Type[DESCRIPTOR_T], data: bytes) -> DESCRIPTOR_T:
if len(data) < 1:
raise ValueError("VP9 descriptor has truncated extended bits")
picture_id = None
tid = None
sid = None
keyframe = None
offset = 0
ei = data[0] >> 7 & 1
ep = data[0] >> 6 & 1
el = data[0] >> 5 & 1
ef = data[0] >> 4 & 1
eb = data[0] >> 3 & 1
ee = data[0] >> 2 & 1
ev = data[0] >> 1 & 1
offset += 1
if ei:
if len(data) < offset:
raise ValueError("VP9 descriptor has truncated extended bits")
em = data[offset] >> 7 & 1
high_bytes = data[offset] & 0x7F
if em:
offset += 1
picture_id = (high_bytes << 8) + data[offset]
else:
picture_id = high_bytes
offset += 1
if el:
sid = data[offset] >> 1 & 0x07
tid = data[offset] >> 5 & 0x07
keyframe = ((not ep) and eb and (sid is None or sid == 0))
return cls(picture_id=picture_id,
tid=tid,
sid=sid,
keyframe=keyframe)
| 29.286667 | 87 | 0.483724 |
aceefca72960bdb9e850be3526226bfc86b3817d | 7,563 | py | Python | imcsdk/mometa/vic/VicImporterAll.py | ragupta-git/ImcSdk | 2e41f2ffe5282d38de85bc4739fa53dd2f0c9bb4 | [
"Apache-2.0"
] | null | null | null | imcsdk/mometa/vic/VicImporterAll.py | ragupta-git/ImcSdk | 2e41f2ffe5282d38de85bc4739fa53dd2f0c9bb4 | [
"Apache-2.0"
] | null | null | null | imcsdk/mometa/vic/VicImporterAll.py | ragupta-git/ImcSdk | 2e41f2ffe5282d38de85bc4739fa53dd2f0c9bb4 | [
"Apache-2.0"
] | 3 | 2018-11-14T13:02:40.000Z | 2018-11-14T13:49:38.000Z | """This module contains the general information for VicImporterAll ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class VicImporterAllConsts:
ADMIN_STATE_TRIGGER = "trigger"
ADMIN_STATE_TRIGGERED = "triggered"
PROTO_FTP = "ftp"
PROTO_HTTP = "http"
PROTO_NONE = "none"
PROTO_SCP = "scp"
PROTO_SFTP = "sftp"
PROTO_TFTP = "tftp"
class VicImporterAll(ManagedObject):
"""This is VicImporterAll class."""
consts = VicImporterAllConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("VicImporterAll", "vicImporterAll", "vic-all-importconfig", VersionMeta.Version303a, "InputOutput", 0x3ff, [], ["admin", "read-only", "user"], [u'topSystem'], [], ["Get", "Set"]),
"modular": MoMeta("VicImporterAll", "vicImporterAll", "vic-all-importconfig", VersionMeta.Version303a, "InputOutput", 0x3ff, [], ["admin", "read-only", "user"], [u'equipmentChassis'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["trigger", "triggered"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"hostname": MoPropertyMeta("hostname", "hostname", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, r"""(([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:) |((([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?\.)+[a-zA-Z]{2,6})|(([a-zA-Z0-9]([a-zA-Z0-9\-]{0,61}[a-zA-Z0-9])?)+)|([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5]))""", [], []),
"proto": MoPropertyMeta("proto", "proto", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{1,255}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x80, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x200, 0, 255, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version303a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"progress": MoPropertyMeta("progress", "progress", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
},
"modular": {
"admin_state": MoPropertyMeta("admin_state", "adminState", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x2, None, None, None, ["trigger", "triggered"], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x4, 0, 255, None, [], []),
"hostname": MoPropertyMeta("hostname", "hostname", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x8, 0, 255, r"""([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:([0-9A-Fa-f]{1,4}:[0-9A-Fa-f]{0,4}|:[0-9A-Fa-f]{1,4})?|(:[0-9A-Fa-f]{1,4}){0,2})|(:[0-9A-Fa-f]{1,4}){0,3})|(:[0-9A-Fa-f]{1,4}){0,4})|:(:[0-9A-Fa-f]{1,4}){0,5})((:[0-9A-Fa-f]{1,4}){2}|:(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])(\.(25[0-5]|(2[0-4]|1[0-9]|[1-9])?[0-9])){3})|(([0-9A-Fa-f]{1,4}:){1,6}|:):[0-9A-Fa-f]{0,4}|([0-9A-Fa-f]{1,4}:){7}:""", [], []),
"proto": MoPropertyMeta("proto", "proto", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, ["ftp", "http", "none", "scp", "sftp", "tftp"], []),
"pwd": MoPropertyMeta("pwd", "pwd", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x20, 0, 255, None, [], []),
"remote_file": MoPropertyMeta("remote_file", "remoteFile", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, r"""[^\(\)~`'\?\\"";<>\|&\*\^$%]{1,255}""", [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x80, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x100, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"user": MoPropertyMeta("user", "user", "string", VersionMeta.Version303a, MoPropertyMeta.READ_WRITE, 0x200, 0, 255, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version303a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"progress": MoPropertyMeta("progress", "progress", "string", VersionMeta.Version303a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
},
}
prop_map = {
"classic": {
"adminState": "admin_state",
"dn": "dn",
"hostname": "hostname",
"proto": "proto",
"pwd": "pwd",
"remoteFile": "remote_file",
"rn": "rn",
"status": "status",
"user": "user",
"childAction": "child_action",
"descr": "descr",
"progress": "progress",
},
"modular": {
"adminState": "admin_state",
"dn": "dn",
"hostname": "hostname",
"proto": "proto",
"pwd": "pwd",
"remoteFile": "remote_file",
"rn": "rn",
"status": "status",
"user": "user",
"childAction": "child_action",
"descr": "descr",
"progress": "progress",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.admin_state = None
self.hostname = None
self.proto = None
self.pwd = None
self.remote_file = None
self.status = None
self.user = None
self.child_action = None
self.descr = None
self.progress = None
ManagedObject.__init__(self, "VicImporterAll", parent_mo_or_dn, **kwargs)
| 66.342105 | 891 | 0.562872 |
aceefe7947370069c40b9a62fa36410dee41c404 | 848 | py | Python | log/forms.py | gabrielgio/log | 2fd18a322d29579362b94122c362d757a7448197 | [
"WTFPL"
] | null | null | null | log/forms.py | gabrielgio/log | 2fd18a322d29579362b94122c362d757a7448197 | [
"WTFPL"
] | null | null | null | log/forms.py | gabrielgio/log | 2fd18a322d29579362b94122c362d757a7448197 | [
"WTFPL"
] | null | null | null | from django import forms
from django.contrib.auth.models import User
class UserRegister(forms.Form):
email = forms.CharField(required=True)
name = forms.CharField(required=True)
password = forms.CharField(required=True)
def is_valid(self):
valid = True
if not super(UserRegister, self).is_valid():
self.add_error('', 'Please, double check your information.')
valid = False
user_existis = User.objects.filter(username=self.data['name']).exists()
if user_existis:
self.add_error('id_name', 'Username already exists.')
valid = False
user_existis = User.objects.filter(email=self.data['email']).exists()
if user_existis:
self.add_error('id_email', 'Email already exists.')
valid = False
return valid
| 29.241379 | 79 | 0.635613 |
aceeff779a2945d78e0cad2c8ade0fa8bc47540a | 8,805 | py | Python | tests/test_packagerbuddy/test_packagerbuddy.py | cedricduriau/PackagerBuddy | 3eda40cd1b72f030e4f02e38af452e6377b20148 | [
"MIT"
] | 1 | 2019-01-10T11:15:40.000Z | 2019-01-10T11:15:40.000Z | tests/test_packagerbuddy/test_packagerbuddy.py | cedricduriau/PackagerBuddy | 3eda40cd1b72f030e4f02e38af452e6377b20148 | [
"MIT"
] | 6 | 2019-01-06T16:56:22.000Z | 2019-01-07T01:43:54.000Z | tests/test_packagerbuddy/test_packagerbuddy.py | cedricduriau/PackagerBuddy | 3eda40cd1b72f030e4f02e38af452e6377b20148 | [
"MIT"
] | null | null | null | # stlib modules
from __future__ import absolute_import
import os
try:
from urllib.request import urlopen
except ImportError:
from urllib2 import urlopen
# tool modules
from packagerbuddy import packagerbuddy
# third party modules
import pytest
def test_get_filename_from_request(patch_url_handler):
"""Test getting the filename of an url from a request object."""
# url with no filename in name, but in request content headers
request = urlopen("http://valid.com")
filename = packagerbuddy._get_filename_from_request(request)
assert filename == "valid.tar"
# url with filename in name, not in request content headers
request = urlopen("http://filename.tar")
filename = packagerbuddy._get_filename_from_request(request)
assert filename == "filename.tar"
def test_normalize_path():
"""Test normalizing paths."""
# ensure tilde is being resolved
assert packagerbuddy._normalize_path("~/") == os.environ["HOME"]
# ensure same level relative is being resolved
assert packagerbuddy._normalize_path("/tmp/./test.txt") == "/tmp/test.txt"
# ensure single level up relative is being resolved
assert packagerbuddy._normalize_path("/tmp/dir/../test.txt") == "/tmp/test.txt"
def test_download():
pass
def test_build_archive_name():
"""Test building the archive name of a specific software release."""
assert packagerbuddy._build_archive_name("software", "version", ".ext") == "software-version.ext"
def test_get_tar_read_mode():
"""Test getting the tar file read modes."""
assert packagerbuddy._get_tar_read_mode("/tmp/test.tar") == "r"
assert packagerbuddy._get_tar_read_mode("/tmp/test.tar.gz") == "r:gz"
assert packagerbuddy._get_tar_read_mode("/tmp/test.tar.bz2") == "r:bz2"
def test_untar():
pass
def test_build_download_url():
"""Test building a download url."""
packagerbuddy._build_download_url("http://valid.com/{version}", "1.0.0") == "http://valid.com/1.0.0"
def test_get_archive(patch_PB_DOWNLOAD):
download_dir = os.environ["PB_DOWNLOAD"]
assert packagerbuddy._get_archive("invalid", "1.0.0") is None
archive = packagerbuddy._get_archive("valid", "1.0.0")
assert archive == os.path.join(download_dir, "valid-1.0.0.tar.gz")
def test_split_ext():
"""Test splitting the extension of paths with supported extensions."""
assert packagerbuddy._split_ext("/tmp/foo.tar") == ("/tmp/foo", ".tar")
assert packagerbuddy._split_ext("/tmp/foo.tar.gz") == ("/tmp/foo", ".tar.gz")
assert packagerbuddy._split_ext("/tmp/foo.tar.gz&response-content-type=application") == ("/tmp/foo", ".tar.gz")
assert packagerbuddy._split_ext("/tmp/foo/1.2.3.tar") == ("/tmp/foo/1.2.3", ".tar")
assert packagerbuddy._split_ext("/tmp/foo/1.2.3.tar.gz") == ("/tmp/foo/1.2.3", ".tar.gz")
def test_split_ext_fail():
"""Test splitting the extension of paths with unsupported extensions."""
# no extension
with pytest.raises(ValueError):
packagerbuddy._split_ext("/tmp/foo/test")
def test_get_config_location(patch_PB_CONFIG):
"""Test getting the software configs location."""
assert packagerbuddy.get_config_location() == os.environ["PB_CONFIG"]
def test_get_download_location(patch_PB_DOWNLOAD):
"""Test getting the software download location."""
current_dir = os.path.dirname(__file__)
expected = os.path.abspath(os.path.join(current_dir, "..", "test_source"))
assert packagerbuddy.get_download_location() == expected
def test_get_install_location(patch_PB_INSTALL):
"""Test getting the software install location."""
current_dir = os.path.dirname(__file__)
expected = os.path.abspath(os.path.join(current_dir, "..", "test_install"))
assert packagerbuddy.get_install_location() == expected
def test_get_scripts_location(patch_PB_SCRIPTS):
"""Test getting the post install scripts location."""
current_dir = os.path.dirname(__file__)
expected = os.path.abspath(os.path.join(current_dir, "..", "test_scripts"))
assert packagerbuddy.get_scripts_location() == expected
def test_get_config(patch_PB_CONFIG):
"""Test getting the config for a valid software."""
config = {"valid": "http://valid.com/{version}.tar"}
assert packagerbuddy.get_config() == config
def test_install():
pass
def test_is_software_installed(patch_PB_INSTALL):
"""Test checking whether a software is installed or not."""
assert packagerbuddy.is_software_installed("valid", "2.0.0") is True
assert packagerbuddy.is_software_installed("valid", "1.0.0") is True
assert packagerbuddy.is_software_installed("valid", "0.0.0") is False
def test_get_installed_software(patch_PB_INSTALL):
"""Test getting the installed software releases."""
install_dir = os.environ["PB_INSTALL"]
assert packagerbuddy.get_installed_software() == [os.path.join(install_dir, "valid-1.0.0"),
os.path.join(install_dir, "valid-2.0.0")]
def test_get_suported_extensions():
"""Test getting the supported software archive extensions."""
assert packagerbuddy.get_suported_extensions() == set([".tar", ".tar.gz", ".tar.bz2", ".tgz"])
def test_validate_config(patch_url_handler):
"""Test validating a valid software config."""
config = {"valid": "http://valid.com/{version}.tar"}
packagerbuddy.validate_config(config, "valid", "1.0.0")
def test_validate_config_fail(patch_url_handler):
"""Test validating invalid software configs."""
version = "1.0.0"
# missing key url
with pytest.raises(KeyError):
packagerbuddy.validate_config({"foo": None}, "valid", version)
# no url value
config = {"valid": None}
with pytest.raises(ValueError):
packagerbuddy.validate_config(config, "valid", version)
# url without version placeholder format
config = {"invalid": "http://invalid.com.tar"}
with pytest.raises(ValueError):
packagerbuddy.validate_config(config, "invalid", version)
# invalid url
config = {"valid": "http://invalid.com/{version}.tar"}
with pytest.raises(ValueError):
packagerbuddy.validate_config(config, "valid", version)
# invalid extension, unsupported
config = {"valid": "http://valid.com/{version}.FOO"}
with pytest.raises(ValueError):
packagerbuddy.validate_config(config, "valid", version)
def test_uninstall():
pass
def test_validate_template_url():
"""Test validating an valid download template url."""
packagerbuddy.validate_template_url("http://test.com/{version}")
def test_validate_template_url_fail():
"""Test validating an invalid download template url."""
with pytest.raises(ValueError):
packagerbuddy.validate_template_url("http://test.com/")
def test_validate_software():
"""Test validating a valid software name."""
packagerbuddy.validate_software("test")
def test_validate_software_fail():
"""Test validating an invalid software name."""
# empty
with pytest.raises(ValueError):
packagerbuddy.validate_software("")
# whitespaces
with pytest.raises(ValueError):
packagerbuddy.validate_software(" ")
def test_add_software(patch_PB_CONFIG):
"""Test adding a software configuration."""
config = packagerbuddy.get_config()
assert "test" not in config
# add twice
packagerbuddy.add_software("test", "http://test.com/{version}")
packagerbuddy.add_software("test", "http://test.com/{version}")
config = packagerbuddy.get_config()
assert "test" in config
assert config["test"] == "http://test.com/{version}"
packagerbuddy.remove_software("test")
def test_remove_software(patch_PB_CONFIG):
"""Test removing a software configuration."""
packagerbuddy.add_software("test", "http://test.com/{version}")
config = packagerbuddy.get_config()
assert "test" in config
# remove twice
packagerbuddy.remove_software("test")
packagerbuddy.remove_software("test")
config = packagerbuddy.get_config()
assert "test" not in config
def test_validate_extension():
"""Test validating a valid extension."""
packagerbuddy.validate_extension(".tar")
packagerbuddy.validate_extension(".tar.gz")
def test_validate_extension_fail():
"""Test validating an invalid extension."""
with pytest.raises(ValueError):
packagerbuddy.validate_extension("")
with pytest.raises(ValueError):
packagerbuddy.validate_extension(".foo")
def test_get_script(patch_PB_SCRIPTS):
"""Test getting the post install scripts of software packages."""
assert packagerbuddy.get_script("invalid") is None
scripts_dir = os.environ["PB_SCRIPTS"]
assert packagerbuddy.get_script("valid") == os.path.join(scripts_dir, "valid")
| 33.865385 | 115 | 0.705963 |
acef00ced5d23127fb48d13d04938a9739cc1bf4 | 1,512 | py | Python | river/neural_net/activations.py | fox-ds/river | 9ce947ebfc012ec7059de0a09c765b2da7fc1d25 | [
"BSD-3-Clause"
] | 2,184 | 2020-11-11T12:31:12.000Z | 2022-03-31T16:45:41.000Z | river/neural_net/activations.py | raphaelsty/river | 2e0b25a2ef2d2ba9ec080cf86a491f7465433b18 | [
"BSD-3-Clause"
] | 328 | 2019-01-25T13:48:43.000Z | 2020-11-11T11:41:44.000Z | river/neural_net/activations.py | raphaelsty/river | 2e0b25a2ef2d2ba9ec080cf86a491f7465433b18 | [
"BSD-3-Clause"
] | 240 | 2020-11-11T14:25:03.000Z | 2022-03-31T08:25:50.000Z | import abc
import numpy as np
__all__ = ["ReLU", "Sigmoid", "Identity"]
class Activation:
"""An activation function.
Each activation function is represented by a class and implements two methods. The first method
is `apply`, which evaluates the activation on an array. The second method is `gradient`, which
computes the gradient with respect to the input array. Both methods are intended to be pure
with no side-effects. In other words they do not modify their inputs.
"""
@abc.abstractstaticmethod
def apply(self, z):
"""Apply the activation function to a layer output z."""
@abc.abstractstaticmethod
def gradient(self, z):
"""Return the gradient with respect to a layer output z."""
class ReLU(Activation):
"""Rectified Linear Unit (ReLU) activation function."""
@staticmethod
def apply(z):
a = np.copy(z)
a[a < 0] = 0
return a
@staticmethod
def gradient(z):
a = np.zeros_like(z, dtype=z.dtype)
a[z > 0] = 1
return a
class Sigmoid(Activation):
"""Sigmoid activation function."""
@staticmethod
def apply(z):
return 1 / (1 + np.exp(-z))
@staticmethod
def gradient(z):
s = Sigmoid.apply(z)
return s * (1 - s)
class Identity(Activation):
"""Identity activation function."""
@staticmethod
def apply(z):
return np.copy(z)
@staticmethod
def gradient(z):
return np.ones_like(z, dtype=z.dtype)
| 22.909091 | 99 | 0.62963 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.